content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from config import path
options = Options()
options.headless = True
driver = webdriver.Chrome(path, chrome_options=options)
class YouTube(object):
"""Text"""
def __init__(self):
pass
def search_videos(self, user_text):
self.user_text = user_text
main_url = "https://www.youtube.com/results?search_query=" + user_text
driver.get(main_url)
sleep(1)
videos = driver.find_elements_by_id("video-title")
url = []
for i in range(len(videos)):
url.append(videos[i].get_attribute('href'))
if i == 10:
return url
break
def search_videos_from_channel(self, user_url):
self.user_url = user_url
driver.get(user_url + "/videos")
videos = driver.find_elements_by_id("video-title")
url = []
for i in range(len(videos)):
url.append(videos[i].get_attribute('href'))
if i == 10:
return url
break
def search_name_channel(self, user_url):
self.user_url = user_url
driver.get(user_url)
channel_name = driver.find_element_by_id("channel-name")
return channel_name.text
def search_youtube_trends(self):
url = "https://www.youtube.com/feed/trending/"
driver.get(url)
videos = driver.find_elements_by_id("thumbnail")
url = []
for i in range(len(videos)):
url.append(videos[i].get_attribute('href'))
if i == 10:
return url
break
|
import os
from eval import predict_captions
import sys
import json
from compare import comp as match_results
from getKeyphrase import getKeyPhrase
import argparse
from readxml import events
import shutil
def final_evaluation(folder='test_data'):
print("=========start generate image captions=========")
image_captions=predict_captions(folder)
#print image_captions
print("=========start generate news keyphrases=========")
keyphrases=getKeyPhrase(folder)
#print keyphrases
#events=
res=match_results(image_captions,keyphrases,events)
return res
'''
def generate_folder(dir='final_matching_results',res,folder='test_data'):
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
for i in range(len(res)):
new_dir="match"+str(i)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
os.chdir('../'+folder)
shutil.copy(, '../'+dir+'/match'+str(i))
'''
if __name__=="__main__":
#os.chdir("ImageCaptionNoCUDA")
parser=argparse.ArgumentParser()
parser.add_argument('--new_dataset',type=str,default='0',
help='1 represents new dataset, 0 represents provided dataset')
parser.add_argument('--image_folder', type=str, default='',
help='If this is nonempty then will predict on the images in this folder path')
args=parser.parse_args()
if(args.new_dataset=='0'):
final_evaluation()
elif(args.new_dataset=='1'):
res=final_evaluation(args.image_folder)
|
"""
d1lod.interface
A high-level wrapper around a d1lod.graph that implements a variety
of d1lod-specific methods.
The most common method that will be called by will be addDataset(). This method
takes care of the work of adding the dataset, its digital objects, and its
people and organizations to the graph. All statements for a dataset are
accumulated into a temporary Redland's RDF.Model w/ an in-memory storage. When
all the triples for a dataset are accumulated, those triples are converted into
a SPARQL UPDATE query string and passed to the Graph to be inserted into
the graph database.
Aside from the basic methods (count, exists, etc), a general pattern is followed
for method naming of having separate methods such as addDataset and
addDatasetTriples (note the addition of the 'Triples' to the name). This pattern
is used to separate concerns, where the former is concerned with higher-level
issue such as whether or not a dataset should be added in the first place and
the latter is concerned with adding the triples for that dataset to the graph.
http://docs.s4.ontotext.com/display/S4docs/Fully+Managed+Database#FullyManagedDatabase-cURL%28dataupload%29
"""
import urllib
import uuid
import re
import RDF
import logging
import dataone, validator, util
from d1lod.people import processing
# Default namespaces
NAMESPACES = {
'owl': 'http://www.w3.org/2002/07/owl#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'xsd': 'http://www.w3.org/2001/XMLSchema#',
'foaf': 'http://xmlns.com/foaf/0.1/',
'dcterms': 'http://purl.org/dc/terms/',
'datacite': 'http://purl.org/spar/datacite/',
'prov': 'http://www.w3.org/ns/prov#',
'geolink': 'http://schema.geolink.org/1.0/base/main#',
'd1dataset': 'http://dataone.org/dataset/',
'd1person': 'http://dataone.org/person/',
'd1org': 'http://dataone.org/organization/',
'd1node': 'https://cn.dataone.org/cn/v1/node/'
}
class Interface:
def __init__(self, graph):
"""Initialize a graph with the given name.
Parameters:
-----------
graph : str
The name of the graph.
"""
self.graph = graph
# Load the formats map
self.formats = util.loadFormatsMap()
# Set up the temporary model which accumulates triples when addDataset
# is called
self.model = None
# Synchronize the newly added namespaces to the Graph object
# for faster referencing
self.graph.ns = NAMESPACES
# Add fixed statements
#
# Note: These are inserted regardless of whether or not they already
# exist
prov = self.graph.ns['prov']
owl = self.graph.ns['owl']
self.graph.insert(RDF.Uri(prov+'wasRevisionOf'), RDF.Uri(owl+'inverseOf'), RDF.Uri(prov+'hadRevision'))
def __str__(self):
return "Interface to Graph: '%s'." % self.graph.name
def prepareTerm(self, term):
"""Prepare an RDF term to be added to an RDF Model.
A term is either:
- An RDF.Node
- An RDF.Uri
- A string, which is either:
- A binding string (e.g., '?s')
- A URI reference (e.g., 'rdf:type')
- A URI (e.g., http://...)
- A literal
If the term is a str with a namespace prefix that the Interface knows
about then that namespace will be interpolated prior to making the term
into an RDF.Uri.
Arguments:
term : str | RDF.Node | RDF.Uri
The RDF term (subject, predicate, or object) to be prepared.
Returns:
str | RDF.Node | RDF.Uri
"""
if isinstance(term, RDF.Uri) or isinstance(term, RDF.Node):
return term
elif isinstance(term, str) or isinstance(term, unicode):
# Binding?: Do nothing
if term.startswith('?'):
return term
# Conver 'http...' strings to RDF.Uri
if term.startswith('http'):
return RDF.Uri(term)
parts = term.split(':')
# URI
if len(parts) > 1 and parts[0] in self.graph.ns:
prefix = self.graph.ns[parts[0]]
other_parts = parts[1:]
term = RDF.Uri(prefix + ':'.join(other_parts))
else:
# Literal
term = RDF.Node(term)
else:
raise Exception("Invalid term sent can't be prepared: (type is %s) Term is `%s`." % (type(term), term))
return term
def createModel(self):
"""Creates a Redland RDF Model.
Returns:
RDF.Model
"""
storage = RDF.HashStorage("temp", options="new='yes',hash-type='memory'")
if storage is None:
raise Exception("new RDF.Storage failed")
model = RDF.Model(storage)
if model is None:
raise Exception("new RDF.model failed")
self.model = model
def insertModel(self):
"""Inserts the current RDF Model (if it exists) into the graph and
deletes it if successful.
Returns: None
"""
if self.model is None:
logging.info("Attempted to insert a model that was None.")
return
# checking for all the statements in the current model
# if either subject / object / predicate is a blank node - then indicate that the payload contains a blank node
blank_node = False
for s in self.model:
blank_node = self.tripleHasBlankNode(s.subject, s.predicate, s.object)
if blank_node == True:
break
sparql_data = " .\n ".join([str(s) for s in self.model])
# Log model size
logging.info('Inserting model of size %d.', self.model.size())
self.graph.insert_data(payload=sparql_data, blank_node=blank_node)
return
def add(self, s, p, o):
"""Adds a triple to the current model.
Performs pre-processing on the subject, predicate, and objects
convert each one into an RDF.Node or RDF.Uri if it isn't already.
Pre-processing will perform namespace interpolation, e.g. if
s=='foo:Bar' and the namespace 'foo' exists and is http://foo.com/, the pre-processing step will convert s to
RDF.Uri('<http://foo.com/Bar').
Parameters:
-----------
s : RDF.Node | str
The subject of the triple pattern.
p : RDF.Node | str
The predicate of the triple pattern.
o : RDF.Node | str
The object of the triple pattern.
Examples:
---------
add(RDF.Uri('http://example.com/#me'), 'rdfs:label', 'Myself')
add(RDF.Uri('http://example.com/#me'), 'rdfs:label', RDF.Node('Myself'))
add(RDF.Uri('http://example.com/#me'), 'rdfs:label', RDF.Node('Myself'))
Returns: None
"""
if self.model is None:
logging.info("Failed to add triple to model because there was no current model.")
return
# - Converts strings to Nodes or Uris, whichever is appropriate
s = self.prepareTerm(s)
p = self.prepareTerm(p)
o = self.prepareTerm(o)
st = ""
try:
st = RDF.Statement(s, p, o)
except:
logging.info("Failed to create statement.")
try:
self.model.append(st)
except RDF.RedlandError:
logging.info("Failed to add statement: %s" % st)
return
def exists(self, s='?s', p='?p', o='?o'):
"""Determine whether any triples matching the given pattern exist in
the graph.
Parameters:
-----------
s : str
The subject of the triple pattern.
p : str
The predicate of the triple pattern.
o : str
The object of the triple pattern.
Returns:
--------
bool
Whether or not any triples with the pattern exist in the Graph.
"""
result = self.find(s=s, p=p, o=o, limit=1)
if result is None:
return False
if len(result) > 0 and 'error-message' in result[0]:
logging.error(result[0]['error-message'])
return False
if len(result) > 0:
return True
return False
def find(self, s='?s', p='?p', o='?o', limit=100):
"""Finds triples in the graph matching the given pattern.
Parameters:
-----------
s : RDF.Node | str
The subject of the triple pattern.
p : RDF.Node | str
The predicate of the triple pattern.
o : RDF.Node | str
The object of the triple pattern.
Returns:
--------
List
A list of Dicts with names s, p, and o.
"""
s = self.prepareTerm(s)
p = self.prepareTerm(p)
o = self.prepareTerm(o)
# checks if the payload contains a blank node or not
blank_node = self.tripleHasBlankNode(s, p, o)
if isinstance(s, RDF.Uri):
s = '<' + str(s) + '>'
if isinstance(p, RDF.Uri):
p = '<' + str(p) + '>'
if isinstance(o, RDF.Uri):
o = '<' + str(o) + '>'
query = u"""
SELECT * WHERE { %s %s %s } LIMIT %d
""" % (s, p, o, limit)
return self.graph.query(query, blank_node=blank_node)
def delete(self, s='?s', p='?p', o='?o'):
"""Delete all triples matching the given pattern from the graph.
Parameters:
-----------
s : str
The subject of the triple pattern.
p : str
The predicate of the triple pattern.
o : str
The object of the triple pattern.
Returns: dictionary object of the SPARQL response
"""
s = self.prepareTerm(s)
p = self.prepareTerm(p)
o = self.prepareTerm(o)
# checks if the payload contains a blank node or not
blank_node = self.tripleHasBlankNode(s, p, o)
if isinstance(s, RDF.Uri):
s = '<' + str(s) + '>'
if isinstance(p, RDF.Uri):
p = '<' + str(p) + '>'
if isinstance(o, RDF.Uri):
o = '<' + str(o) + '>'
payload_data = u"%s %s %s" % (s, p, o)
return self.graph.delete_data(payload=payload_data, blank_node=blank_node)
def datasetExists(self, identifier):
"""Determines whether a dataset exists in the graph.
The criterion used for existence is whether or not *any* triples with
the given identifier exist in the graph.
Parameters:
-----------
identifier : str
Non-urlencoded DataOne identifier
Returns:
--------
bool
Whether or not the dataset exists.
"""
identifier_esc = urllib.unquote(identifier).decode('utf8')
result = self.find(s='d1dataset:'+identifier_esc, limit=1)
if result is None or len(result) <= 0:
return False
else:
return True
def addDataset(self, identifier, doc=None):
"""Adds a dataset to the graph.
Parameters:
-----------
identifier : str
Non-urlencoded DataOne identifier
doc : XML Element
An XML element containing a result from the Solr index which
contains a number of fields relating to a dataset.
Returns: None
"""
if self.model is not None:
raise Exception("Model existed when addDataset was called. This means the last Model wasn't cleaned up after finishing.")
self.createModel()
# Get Solr fields if they weren't passed in
if doc is None:
doc = dataone.getSolrIndexFields(identifier)
identifier = dataone.extractDocumentIdentifier(doc)
identifier_esc = urllib.unquote(identifier).decode('utf8')
dataset_node = RDF.Uri(self.graph.ns['d1dataset'] + identifier_esc)
self.add(dataset_node, 'rdf:type', 'geolink:Dataset')
# Delete if dataset is already in graph
if self.datasetExists(identifier):
logging.info("Dataset with identifier %s already exists. Deleting then re-adding.", identifier)
self.deleteDataset(identifier)
scimeta = dataone.getScientificMetadata(identifier)
records = processing.extractCreators(identifier, scimeta)
vld = validator.Validator()
# Add Dataset triples first, we'll use them when we add people
# to match to existing people by the current dataset's 'obsoletes' field
self.addDatasetTriples(dataset_node, doc)
# Add people and organizations
people = [p for p in records if 'type' in p and p['type'] == 'person']
organizations = [o for o in records if 'type' in o and o['type'] == 'organization']
# Always do organizations first, so peoples' organization URIs exist
for organization in organizations:
organization = vld.validate(organization)
self.addOrganization(organization)
for person in people:
person = vld.validate(person)
self.addPerson(person)
# Commit or reject the model here
if self.model is None:
raise Exception("Model was None. It should have been an RDF.Model.")
self.insertModel()
self.model = None # Remove the model since we're done
return
def addDatasetTriples(self, dataset_node, doc):
"""Adds a dataset triples to the RDF model
Parameters:
-----------
dataset_node : str
The corresponding dataset node to be used to associate the properties in the model
doc : XML Element
An XML element containing a result from the Solr index which
contains a number of fields relating to a dataset.
Returns: None
"""
if self.model is None:
raise Exception("Model not found.")
identifier = dataone.extractDocumentIdentifier(doc)
identifier_esc = urllib.unquote(identifier).decode('utf8')
# type Dataset
self.add(dataset_node, 'rdf:type', 'geolink:Dataset')
# Title
title_element = doc.find("./str[@name='title']")
if title_element is not None:
self.add(dataset_node, 'rdfs:label', RDF.Node(title_element.text))
# Add geolink:Identifier
self.addIdentifierTriples(dataset_node, identifier)
# Abstract
abstract_element = doc.find("./str[@name='abstract']")
if abstract_element is not None:
self.add(dataset_node, 'geolink:description', RDF.Node(abstract_element.text))
# Spatial Coverage
bound_north = doc.find("./float[@name='northBoundCoord']")
bound_east = doc.find("./float[@name='eastBoundCoord']")
bound_south = doc.find("./float[@name='southBoundCoord']")
bound_west = doc.find("./float[@name='westBoundCoord']")
if all(ele is not None for ele in [bound_north, bound_east, bound_south, bound_west]):
if bound_north.text == bound_south.text and bound_west.text == bound_east.text:
wktliteral = "POINT (%s %s)" % (bound_north.text, bound_east.text)
else:
wktliteral = "POLYGON ((%s %s, %s %s, %s %s, %s, %s))" % (bound_west.text, bound_north.text, bound_east.text, bound_north.text, bound_east.text, bound_south.text, bound_west.text, bound_south.text)
self.add(dataset_node, 'geolink:hasGeometryAsWktLiteral', RDF.Node(wktliteral))
# Temporal Coverage
begin_date = doc.find("./date[@name='beginDate']")
end_date = doc.find("./date[@name='endDate']")
if begin_date is not None:
self.add(dataset_node, 'geolink:hasStartDate', RDF.Node(begin_date.text))
if end_date is not None:
self.add(dataset_node, 'geolink:hasEndDate', RDF.Node(end_date.text))
# Obsoletes as PROV#wasRevisionOf
obsoletes_node = doc.find("./str[@name='obsoletes']")
if obsoletes_node is not None:
other_document_esc = urllib.unquote(obsoletes_node.text).decode('utf8')
self.add(dataset_node, 'prov:wasRevisionOf', RDF.Uri(self.graph.ns['d1dataset'] + other_document_esc))
# Landing page
self.add(dataset_node, 'geolink:hasLandingPage', RDF.Uri("https://search.dataone.org/#view/" + identifier_esc))
# Digital Objects
# If this document has a resource map, get digital objects from there
# Otherwise, use the cito:documents field in Solr
resource_map_identifiers = doc.findall("./arr[@name='resourceMap']/str")
if len(resource_map_identifiers) > 0:
for resource_map_node in resource_map_identifiers:
resource_map_identifier = resource_map_node.text
digital_objects = dataone.getAggregatedIdentifiers(resource_map_identifier)
for digital_object in digital_objects:
digital_object_identifier = urllib.unquote(digital_object).decode('utf8')
self.addDigitalObject(identifier, digital_object_identifier)
else:
# If no resourceMap or documents field, at least add the metadata
# file as a digital object
# dataUrl e.g. https://cn.dataone.org/cn/v1/resolve/doi%3A10.6073%2FAA%2Fknb-lter-cdr.70061.123
data_url_node = doc.find("./str[@name='dataUrl']")
if data_url_node is not None:
data_url = data_url_node.text
digital_object = dataone.extractIdentifierFromFullURL(data_url)
digital_object = urllib.unquote(digital_object).decode('utf8')
self.addDigitalObject(identifier, digital_object)
return
def deleteDataset(self, identifier):
"""
Deletes the dataset from the graph
:param identifier: The identifier of the dataset to be deleted.
:return:
None
"""
self.deleteDatasetTriples(identifier)
return
def deleteDatasetTriples(self, identifier):
"""Delete all triples about this dataset. This includes:
- The dataset triples themselves (title, start date, etc)
- The dataset's digital objects
- The identifiers for the dataset and digital object(s)
- The isCreatorOf statement for people and organizations
This is a bit of extra work because identifiers and digital objects
are blank nodes and querying those takes some multi-statement SPARQL
queries.
"""
# Prepare some SPARQL query terms
identifier_esc = urllib.unquote(identifier).decode('utf8')
dataset = RDF.Uri(self.graph.ns['d1dataset']+identifier_esc)
has_identifier = RDF.Uri(self.graph.ns['geolink']+'hasIdentifier')
is_part_of = RDF.Uri(self.graph.ns['geolink']+'isPartOf')
has_part = RDF.Uri(self.graph.ns['geolink']+'hasPart')
"""Delete Dataset identifier
Find the blank node for the identifier of this dataset and delete
all statements about it.
"""
query = u"""DELETE WHERE
{
GRAPH <%s>
{
<%s> <%s> ?identifier .
?identifier ?s ?p
}
}
""" % (self.graph.name, dataset, has_identifier)
self.graph.update(query)
"""Delete Digital Object identifiers
Find all Digital Object (through Digital Object isPartOf) identifier
blank nodes and delete all statements about those blank nodes.
"""
query = u"""DELETE WHERE
{
GRAPH <%s>
{
?digital_object <%s> <%s> .
?digital_object <%s> ?identifier .
?identifier ?p ?o
}
}
""" % (self.graph.name, is_part_of, dataset, has_identifier)
self.graph.update(query)
"""Delete Digital Objects
Find all Digital Object blank nodes (through Dataset hasPart) and
delete statements about blank nodes.
"""
query = u"""DELETE WHERE
{
GRAPH <%s>
{
<%s> <%s> ?digital_object.
?digital_object ?p ?o
}
}
""" % (self.graph.name, dataset, has_part)
self.graph.update(query)
"""Delete statements about the dataset itself"""
self.delete('d1dataset:'+identifier_esc, '?p', '?o')
"""Delete respective isCreatorOf statements"""
self.delete('?s', 'geolink:isCreatorOf', '?o')
return
def addDigitalObject(self, dataset_identifier, digital_object_identifier):
"""
Generates and adds Dataset Object within the Virtuoso database
:param dataset_identifier: String
Dataset PID
:param digital_object_identifier: String
Corresponding digital identifier for the PID
:return:
None
"""
try:
self.addDigitalObjectTriples(dataset_identifier, digital_object_identifier)
except Exception as e:
logging.error(e)
return
def addDigitalObjectTriples(self, dataset_identifier, digital_object_identifier):
"""
Generates a new node for the dataset and adds metadata triples associated to the digital Object.
:param dataset_identifier: String
Dataset PID
:param digital_object_identifier: String
Corresponding digital identifier for the PID
:return:
None
"""
if self.model is None:
raise Exception("Model not found.")
dataset_identifier_esc = urllib.unquote(dataset_identifier).decode('utf8')
do_node = RDF.Node(blank=str(uuid.uuid4()))
# Get data object meta
data_meta = dataone.getSystemMetadata(digital_object_identifier)
if data_meta is None:
raise Exception("System metadata for data object %s was not found. Continuing to next data object." % digital_object_identifier)
self.add(do_node, 'rdf:type', 'geolink:DigitalObject')
self.add(do_node, 'geolink:isPartOf', 'd1dataset:'+dataset_identifier_esc)
self.add('d1dataset:'+dataset_identifier_esc, 'geolink:hasPart', do_node)
self.addIdentifierTriples(do_node, digital_object_identifier)
# Checksum and checksum algorithm
checksum_node = data_meta.find(".//checksum")
if checksum_node is not None:
self.add(do_node, 'geolink:hasChecksum', RDF.Node(checksum_node.text))
self.add(do_node, 'geolink:hasChecksumAlgorithm', RDF.Node(checksum_node.get("algorithm")))
else:
raise Exception('Sysmeta XML for PID %s had no checksum element' % digital_object_identifier)
# Size
size_node = data_meta.find("./size")
if size_node is not None:
self.add(do_node, 'geolink:hasByteLength', RDF.Node(size_node.text))
else:
raise Exception('Sysmeta XML for PID %s had no size element' % digital_object_identifier)
# Format
format_id_node = data_meta.find("./formatId")
if format_id_node is not None:
if format_id_node.text in self.formats:
self.add(do_node, 'geolink:hasFormat', RDF.Uri(self.formats[format_id_node.text]['uri']))
else:
raise Exception('Format %s not found in list of known formats.' % format_id_node.text)
else:
raise Exception('Sysmeta XML for PID %s had no formatId element' % digital_object_identifier)
# Date uploaded
date_uploaded_node = data_meta.find("./dateUploaded")
if date_uploaded_node is not None:
self.add(do_node, 'geolink:dateUploaded', RDF.Node(date_uploaded_node.text))
else:
raise Exception('Sysmeta XML for PID %s had no dataUploaded element' % digital_object_identifier)
# Authoritative MN
authoritative_mn = data_meta.find("./authoritativeMemberNode")
if authoritative_mn is not None:
self.add(do_node, 'geolink:hasAuthoritativeDigitalGraph', 'd1node:' + authoritative_mn.text)
else:
raise Exception('Sysmeta XML for PID %s had no authoritativeMemberNode element' % digital_object_identifier)
# Replica MN's
replica_mns = data_meta.findall("./replica")
if replica_mns is None:
raise Exception('Sysmeta XML for PID %s had no replica element' % digital_object_identifier)
for replica_mn in replica_mns:
replica_node = replica_mn.find("./replicaMemberNode")
if replica_node is not None:
self.add(do_node, 'geolink:hasReplicaDigitalGraph', 'd1node:' + replica_node.text)
else:
raise Exception('Sysmeta XML for PID %s had no replicaMemberNode element' % digital_object_identifier)
# Origin MN
origin_mn = data_meta.find("./originMemberNode")
if origin_mn is not None:
self.add(do_node, 'geolink:hasOriginDigitalgraph', 'd1node:' + origin_mn.text)
else:
raise Exception('Sysmeta XML for PID %s had no originMemberNode element' % digital_object_identifier)
# Obsoletes (mapped as PROV#wasRevisionOf)
obsoletes_node = data_meta.find("./obsoletes")
if obsoletes_node is not None:
other_document = urllib.unquote(obsoletes_node.text).decode('utf8')
self.add(do_node, 'prov:wasRevisionOf', 'd1dataset:'+other_document)
# Submitter and rights holders
# submitter_node = data_meta.find("./submitter")
#
# if submitter_node is not None:
# submitter_node_text = " ".join(re.findall(r"o=(\w+)", submitter_node.text, re.IGNORECASE))
#
# if len(submitter_node_text) > 0:
# self.insert('d1dataset:'+data_id, 'geolink:hasCreator', ])
# rights_holder_node = data_meta.find("./rightsHolder")
#
# if rights_holder_node is not None:
# rights_holder_node_text = " ".join(re.findall(r"o=(\w+)", rights_holder_node.text, re.IGNORECASE))
#
# if len(rights_holder_node_text) > 0:
# addStatement(model, d1dataset+data_id, self.graph.ns["geolink"]+"hasRightsHolder", RDF.Uri("urn:node:" + rights_holder_node_text.upper()))
return
def addPerson(self, record):
"""
Tries to find the person uri in the database. If found, it adds triples to the corresponding
person. Otherwise, it mints a new URI for the person identity and adds the information to the Dataset model.
:param record: Dict
Details related to the person
:return:
None
"""
if record is None:
return
logging.info("Calling findPersonURI on %s.", record)
person_uri = self.findPersonURI(record)
if person_uri is None:
person_uri = self.mintPersonPrefixedURIString()
logging.info("Person was not found. Minted URI of %s", person_uri)
self.addPersonTriples(person_uri, record)
def addPersonTriples(self, uri, record):
"""
Given an person URI, it adds the corresponding information as triples and adds the triples
to the models to be inserted into the Virtuoso database.
:param uri: String
The person URI
:param record: Dict
Dictionary object of information related to the organization
:return:
None
"""
if self.model is None:
raise Exception("Model not found.")
logging.info("Adding person triples for person with uri '%s' and record '%s'", uri, record)
self.add(uri, 'rdf:type', 'geolink:Person')
if 'salutation' in record:
self.add(uri, 'geolink:namePrefix', record['salutation'])
if 'full_name' in record:
self.add(uri, 'geolink:nameFull', record['full_name'])
if 'first_name' in record:
self.add(uri, 'geolink:nameGiven', record['first_name'])
if 'last_name' in record:
self.add(uri, 'geolink:nameFamily', record['last_name'])
if 'organization' in record:
organization_name = record['organization']
logging.info("Looking up organization with name '%s'", organization_name)
if self.organizationExists(organization_name):
logging.info("Organization with name '%s' exists.", organization_name)
organization_uri = self.findOrganizationURI({'name' : organization_name})
else:
organization_uri = self.mintOrganizationPrefixedURIString()
logging.info("Minted new organization URI of '%s' and adding triples.", organization_uri)
self.add(organization_uri, 'rdfs:label', organization_name)
self.add(uri, 'geolink:hasAffiliation', RDF.Uri(organization_uri))
if 'email' in record:
self.add(uri, 'foaf:mbox', RDF.Uri('mailto:' + record['email'].lower()))
if 'address' in record:
self.add(uri, 'geolink:address', record['address'])
if 'role' in record and 'document' in record:
if record['role'] == 'creator':
self.add(uri, 'geolink:isCreatorOf', 'd1dataset:' + urllib.unquote(record['document']).decode('utf8'))
elif record['role'] == 'contact':
self.add(uri, 'geolink:isContactOf', 'd1dataset:' + urllib.unquote(record['document']).decode('utf8'))
def addOrganization(self, record):
"""
Tries to find the organization uri in the database. If found, it adds triples to the corresponding
organization. Otherwise, it mints a new URI for the organization and adds the information related to the
organization
:param record: Dict
Details related to the organization
:return:
None
"""
if record is None:
return
logging.info("Calling findOrganizationURI on %s.", record)
organization_uri = self.findOrganizationURI(record)
if organization_uri is None:
organization_uri = self.mintOrganizationPrefixedURIString()
logging.info("Organization was not found. Minted URI of %s", organization_uri)
self.addOrganizationTriples(organization_uri, record)
def addOrganizationTriples(self, uri, record):
"""
Given an organization URI, it adds the corresponding information as triples and adds the triples
to the models to be inserted into the Virtuoso database.
:param uri: String
The organization URI
:param record: Dict
Dictionary object of information related to the organization
:return:
None
"""
if self.model is None:
raise Exception("Model not found.")
logging.info("Adding organization triples for organization with uri '%s' and record '%s'", uri, record)
self.add(uri, 'rdf:type', 'geolink:Organization')
if 'name' in record:
self.add(uri, 'rdfs:label', record['name'])
if 'email' in record:
self.add(uri, 'foaf:mbox', RDF.Uri('mailto:' + record['email'].lower()))
if 'address' in record:
self.add(uri, 'geolink:address', record['address'])
if 'role' in record and 'document' in record:
if record['role'] == 'creator':
self.add(uri, 'geolink:isCreatorOf', 'd1dataset:' + urllib.unquote(record['document']).decode('utf8'))
elif record['role'] == 'contact':
self.add(uri, 'geolink:isContactOf', 'd1dataset:' + urllib.unquote(record['document']).decode('utf8'))
def findPersonURI(self, record):
"""Find a person record in the graph according to a set of rules
for matching records.
A record is said to already exist in the graph if exactly one
person exists in graph with the same non-zero-length last name and
email. This is the only rule used right now.
Arguments:
----------
record : Dict
A Dictionary of keys for the record ('last_name, 'email', etc.)
"""
if record is None:
return None
logging.info("Looking up person URI for %s", record)
# Match via last name and email
if 'last_name' in record and 'email' in record:
logging.info("Attempting to match %s via last name and email.", record)
last_name = record['last_name']
email = record['email']
if len(last_name) < 1 or len(email) < 1:
return None
query_string = u"""
SELECT ?s
WHERE {
?s rdf:type geolink:Person .
?s geolink:nameFamily '''%s''' .
?s foaf:mbox <mailto:%s>
}
""" % (last_name,
email.lower())
find_result = self.graph.query(query_string)
if find_result is None or len(find_result) != 1:
logging.info("No match found.")
return None
# Remove < and > around string
person_uri_string = find_result[0]['s']
person_uri_string = person_uri_string.replace('<', '')
person_uri_string = person_uri_string.replace('>', '')
# Make an RDF.Uri
person_uri = RDF.Uri(person_uri_string)
logging.info("Match found %s", person_uri)
return person_uri
# Search for existing records that are creators of documents obsoleted
# by the current one. To do this we query the current model (if it
# exists) for a prov:wasRevisionOf statement.
if 'last_name' in record and 'document' in record and self.model is not None:
logging.info("Attempting to match %s via last name and wasRevisionOf.", record)
query = RDF.Statement(subject = RDF.Uri(self.graph.ns['d1dataset']+urllib.unquote(record['document']).decode('utf8')),
predicate = RDF.Uri(self.graph.ns['prov']+'wasRevisionOf'))
revised_documents = []
for st in self.model.find_statements(query):
# Only add unique datasets because we end up adding multiple
# revision statements for the metadata and the digital object
# Convert the object to a str and remove its namespace prefix
# first
object_string = str(st.object).replace(self.graph.ns['d1dataset'], '')
if object_string not in revised_documents:
revised_documents.append(object_string)
if len(revised_documents) != 1:
return None
last_name = RDF.Node(record['last_name'])
revised_document = RDF.Uri(self.graph.ns['d1dataset'] + revised_documents[0])
# Query
query_string = u"""select ?person
where {
?person rdf:type geolink:Person .
?person geolink:nameFamily '''%s''' .
?person geolink:isCreatorOf <%s> .
}""" % (last_name, revised_document)
logging.info("Looking up person with query '%s'.", query_string)
result = self.graph.query(query_string)
# Use the person if we find exactly one match
if len(result) == 1 and 'person' in result[0]:
logging.info("Person match found.")
result_string = result[0]['person']
person_uuid_search = re.search(r"<%s(.*)>" % self.graph.ns['d1person'], result_string)
if person_uuid_search is None:
logging.error("Failed to extract UUID string from result.")
return None
person_uuid = person_uuid_search.group(1)
return RDF.Uri(self.graph.ns['d1person']+person_uuid)
return None
def organizationExists(self, organizationName):
"""Checks if the organization already exists in the graph or not.
It uses findOrganizationURI method to check if the
organization URI is in the system or not
Arguments:
----------
record : Dict
A Dictionary of keys for the record ('name')
:return: boolean
A boolean value representing the existence of organization in the graph
"""
if len(organizationName) < 1:
return None
record = {}
record["name"] = organizationName
return True if self.findOrganizationURI(record) == None else False
def findOrganizationURI(self, record):
"""
Find an organization record in the graph according to a set of
rules for matching records.
A record is said to already exist in the graph if exactly one
organization in the graph the same non-zero-length name. This is
the only rule used right now.
:param record: Dict
A Dictionary of keys for the record ('last_name, 'email', etc.)
:return: organization_uri: String
Organization URI String
"""
if record is None:
return None
if 'name' in record:
name = record['name']
if len(name) < 1:
return None
query_string = u"""
SELECT ?s
WHERE {
?s rdf:type geolink:Organization .
?s rdfs:label '''%s'''
}
""" % name
logging.info("Looking up organization with query %s", query_string)
find_result = self.graph.query(query_string)
if find_result is None or len(find_result) != 1:
logging.info("Organization not found by name.")
return None
# Remove < and > around string
organization_uri_string = find_result[0]['s']
organization_uri_string = organization_uri_string.replace('<', '')
organization_uri_string = organization_uri_string.replace('>', '')
# Make an RDF.Uri
organization_uri = RDF.Uri(organization_uri_string)
logging.info("Found organiztion match for organization URI %s.", organization_uri)
return organization_uri
return None
def mintPersonPrefixedURIString(self):
"""
Generates a new URI for the Person
:return:
String:
generated uri_string to the calling function
"""
new_uuid = str(uuid.uuid4())
uri_string = "d1person:urn:uuid:%s" % new_uuid
return uri_string
def mintOrganizationPrefixedURIString(self):
"""
Generates a new URI for the Organization
:return:
String:
generated uri_string to the calling function
"""
new_uuid = str(uuid.uuid4())
uri_string = "d1org:urn:uuid:%s" % new_uuid
return uri_string
def addIdentifierTriples(self, node, identifier):
"""
Add triples for the given identifier to the given node.
:param node:
:param identifier:
:return:
None
"""
if self.model is None:
raise Exception("Model not found.")
scheme = util.getIdentifierScheme(identifier)
resolve_url = util.getIdentifierResolveURL(identifier)
# Create a blank node for the identifier
identifier_node = RDF.Node(blank=str(uuid.uuid4()))
self.add(node, 'geolink:hasIdentifier', identifier_node)
self.add(identifier_node, 'rdf:type', 'geolink:Identifier')
self.add(identifier_node, 'rdfs:label', RDF.Node(identifier))
self.add(identifier_node, 'geolink:hasIdentifierValue', RDF.Node(identifier))
self.add(identifier_node, 'geolink:hasIdentifierScheme', 'datacite:'+scheme)
if resolve_url is not None:
self.add(identifier_node, 'geolink:hasIdentifierResolveURL', RDF.Uri(resolve_url))
# Also always add the DataOne resolve URL for non local-resource-identifier-scheme identifiers
if scheme != 'local-resource-identifier-scheme':
dataone_resolve_url = 'https://cn.dataone.org/cn/v1/resolve/%s' % urllib.unquote(identifier).decode('utf8')
self.add(identifier_node, 'geolink:hasIdentifierResolveURL', RDF.Uri(dataone_resolve_url))
return
def tripleHasBlankNode(self, s , p , o):
"""
Checks if any of the s / p / o from the triple is a blank node or not
s : RDF.Node | str
The subject of the triple pattern.
p : RDF.Node | str
The predicate of the triple pattern.
o : RDF.Node | str
The object of the triple pattern.
:return:
A boolean value indicating whether either of the s / p / o is a blank node or not
"""
# Check for blank nodes:
if isinstance(s, RDF.Node):
if s.is_blank():
return True
elif isinstance(s, str):
if s.startswith("_:"):
return True
if isinstance(p, RDF.Node):
if p.is_blank():
return True
elif isinstance(p, str):
if p.startswith("_:"):
return True
if isinstance(o, RDF.Node):
if o.is_blank():
return True
elif isinstance(o, str):
if o.startswith("_:"):
return True
return False
|
from netapp.netapp_object import NetAppObject
class PriorityEntryInfo(NetAppObject):
"""
A priority associated to the specified priority group.
"""
_priority = None
@property
def priority(self):
"""
The priority associated with the specified priority group.
Range: [0..7]
"""
return self._priority
@priority.setter
def priority(self, val):
if val != None:
self.validate('priority', val)
self._priority = val
@staticmethod
def get_api_name():
return "priority-entry-info"
@staticmethod
def get_desired_attrs():
return [
'priority',
]
def describe_properties(self):
return {
'priority': { 'class': int, 'is_list': False, 'required': 'required' },
}
|
import sys
import codecs
import string
def crlf2lf(path, encoding='utf8'):
with codecs.open(path, 'r', encoding=encoding) as fs:
data = fs.read()
with codecs.open(path, 'w', encoding=encoding) as fs:
data = data.replace('\r', '')
fs.write(data)
def lf2crlf(path, encoding='utf8'):
with codecs.open(path, 'r', encoding=encoding) as fs:
data = fs.read()
with codecs.open(path, 'w', encoding=encoding) as fs:
data = data.replace('\r', '')
data = data.replace('\n', '\r\n')
fs.write(data)
def main():
assert len(sys.argv) >= 3, 'input argv error'
_newline = sys.argv[1]
_path = sys.argv[2]
_encoding = sys.argv[3] if len(sys.argv) > 4 else 'utf8'
if _newline == 'lf':
crlf2lf(_path, _encoding)
elif _newline == 'crlf':
lf2crlf(_path, _encoding)
else:
raise TypeError('newline invaild')
if __name__ == '__main__':
main()
|
from functools import partial
import copy
import pylab
import multiprocessing as mp
from ea import adult_selection
from ea import parent_selection
from ea import reproduction
from ea import float_gtype
from ea import main
from ea.ea_globals import *
import ctrnn
import min_cog_game
def fitness_thread(agent, game):
'''A single fitness testing thread'''
return game.play(agent, False)
def fitness_test_mp(population, game):
'''Multiprocessing fitness test'''
games = [copy.deepcopy(game) for i in xrange(len(population))]
pool = mp.Pool(mp.cpu_count())
indices = []
workers = []
for i, ind in enumerate(population):
indices += [i]
workers += [pool.apply_async(fitness_thread, [ind.ptype, games[i]])]
for i, worker in enumerate(workers):
score = worker.get()
if population[indices[i]].fitness:
population[indices[i]].fitness = (population[indices[i]].fitness + score)/2.
else:
population[indices[i]].fitness = max(0, score)
pool.close()
pool.join()
return population
def fitness_test(population, visual):
'''Play a game with each individual in the population and assign fitness based on score'''
game = min_cog_game.Game()
if visual:
for ind in population:
score = game.play(ind.ptype, visual)
ind.fitness = max(0, score)
return population
else:
return fitness_test_mp(population, game)
def gen_fitness():
'''Generate the fitness function interactively'''
while True:
visual = raw_input("Do you want gameplay visualisations? (y/n):\n")
if visual == 'y' or visual == 'Y':
return partial(fitness_test, visual=True)
elif visual == 'n' or visual == 'N':
return partial(fitness_test, visual=False)
else:
print "Please type y or n."
def develop(population, num_input, num_hidden, num_output):
'''Create CTRNN objects from float lists.'''
num_weights = num_hidden*(num_input + num_hidden) + num_output*(num_hidden + num_output)
num_biases = num_hidden + num_output
num_gains = num_hidden + num_output
num_taus = num_hidden + num_output
for ind in population:
i = 0
weight_list = ind.gtype[i:i+num_weights]
i += num_weights
bias_list = ind.gtype[i:i+num_biases]
i += num_biases
gain_list = ind.gtype[i:i+num_gains]
i += num_gains
tau_list = ind.gtype[i:i+num_taus]
ind.ptype = ctrnn.CTRNN(num_input, num_hidden, num_output,
weight_list, bias_list, gain_list, tau_list)
return population
def visualize(generation_list):
'''Generate pretty pictures using pylab and pygame'''
best = []
average = []
stddev = []
average_plus_stddev = []
average_minus_stddev = []
for pop in generation_list:
best += [most_fit(pop).fitness]
average += [avg_fitness(pop)]
stddev += [fitness_stddev(pop)]
average_plus_stddev += [average[-1] + stddev[-1]]
average_minus_stddev += [average[-1] - stddev[-1]]
pylab.figure(1)
pylab.fill_between(range(len(generation_list)), average_plus_stddev, average_minus_stddev, alpha=0.2, color='b', label="Standard deviation")
pylab.plot(range(len(generation_list)), best, color='r', label='Best')
pylab.plot(range(len(generation_list)), average, color='b', label='Average with std.dev.')
pylab.title("Fitness plot - Beer-cog")
pylab.xlabel("Generation")
pylab.ylabel("Fitness")
pylab.legend(loc="upper left")
pylab.savefig("mincog_fitness.png")
best_index = best.index(max(best))
best_individual = most_fit(generation_list[-1])
with open('last.txt','w') as f:
f.write(str(best_individual.gtype))
print best_individual.gtype
game = min_cog_game.Game()
game.play(best_individual.ptype, True)
if __name__ == "__main__":
popsize = int(raw_input("Input population size:\n"))
adult_selector, litter_size = adult_selection.gen_adult_selection(popsize)
parent_selector = parent_selection.gen_parent_selection(litter_size)
num_input = 5
num_hidden = 2
num_output = 2
num_weights = num_hidden*(num_input + num_hidden) + num_output*(num_hidden + num_output)
num_biases = num_hidden + num_output
num_gains = num_hidden + num_output
num_taus = num_hidden + num_output
ranges = []
ranges += [(-5.0, 5.0)]*num_weights
ranges += [(-10.0, 0.0)]*num_biases
ranges += [(1.0, 5.0)]*num_gains
ranges += [(1.0, 2.0)]*num_taus
mutate = float_gtype.gen_mutate(ranges)
crossover = float_gtype.gen_crossover()
reproducer = reproduction.gen_reproduction(mutate, crossover)
generations = int(raw_input("Input max number of generations:\n"))
fitness_goal = float(raw_input("Input fitness goal, 0 for none:\n"))
development = partial(develop, num_input=num_input, num_hidden=num_hidden, num_output=num_output)
fitness_tester = gen_fitness()
initial = [individual(gtype=float_gtype.generate(ranges), age=0) for i in xrange(popsize)]
generation_list = main.evolutionary_algorithm(initial, development, fitness_tester, adult_selector, parent_selector, reproducer, generations, fitness_goal)
visualize(generation_list) |
"""
Let 1 represent ‘A’, 2 represents ‘B’, etc. Given a digit sequence, count the number of possible decodings of the given digit sequence.
Examples:
Input: digits[] = "121"
Output: 3
// The possible decodings are "ABA", "AU", "LA"
Input: digits[] = "1234"
Output: 3
// The possible decodings are "ABCD", "LCD", "AWD"
"""
def count_decoding_DP(digits, n):
# A table to store results of subproblems
count = [0] * (n+1)
count[0] = 1
count[1] = 1
for i in range(2, n+1):
count[i] = 0
# If the last digit is not 0,
# then last digit must add to
# the number of words
if (digits[i-1] > '0'):
count[i] = count[i-1]
# If second last digit is smaller
# than 2 and last digit is
# smaller than 7, then last two
# digits form a valid character
if (digits[i-2] == '1' or (digits[i-2] == '2' and digits[i-1] < '7') ):
count[i] += count[i-2]
return count[n]
# Driver program to test above function
digits = ['1','2','3','4']
n = len(digits)
print("Count is ",count_decoding_DP(digits, n))
|
"""pymoku example: Basic Laser Lock Box
This example demonstrates how you can configure the laser lock box
instrument
(c) 2019 Liquid Instruments Pty. Ltd.
"""
from pymoku import Moku
from pymoku.instruments import LaserLockBox
from scipy import signal
def gen_butterworth(corner_frequency):
"""
Generate coefficients for a second order butterworth low-pass filter.
Corner frequencies for laser lock box second harmonic filtering should be
in the range: 1 kHz < corner frequency < 31.25 MHz.
"""
sample_rate = 31.25e6
normalised_corner = corner_frequency / (sample_rate / 2)
b, a = signal.butter(2, normalised_corner, 'low', analog=False)
coefficient_array = [[1.0, b[0], b[1], b[2], -a[1], -a[2]],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0]]
return coefficient_array
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('Moku')
try:
i = m.deploy_or_connect(LaserLockBox)
# set local oscillator, auxiliary and scan generators
i.set_local_oscillator(source='internal', frequency=0, phase=90,
pll_auto_acq=False)
i.set_aux_sine(amplitude=1.0, frequency=10e3, phase=0, sync_to_lo=False,
output='out1')
i.set_scan(frequency=1e3, phase=0, output='out2', amplitude=1.0,
waveform='triangle')
# configure PIDs:
i.set_pid_by_gain(1, g=1, kp=1)
i.set_pid_by_gain(2, g=1, kp=1)
# configure second harmonic rejection low pass filter
coef_array = gen_butterworth(1e4)
i.set_custom_filter(coef_array)
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
RULE_DICT = {
'max': np.max,
'min': np.min,
'avg': np.mean,
'prd': np.prod
}
class AggregationEnsemble(BaseEstimator, ClassifierMixin):
def __init__(self, estimators, rule):
self.estimators = estimators
self.rule = rule
def fit(self, X, y):
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
self.classifiers_pool_ = self.estimators
assert self.rule in RULE_DICT
return self
def predict_proba(self, X):
check_is_fitted(self, ['classes_'])
X = check_array(X)
predictions = []
for est in self.classifiers_pool_:
predictions.append(est.predict_proba(X))
predictions = np.array(predictions)
return RULE_DICT[self.rule](predictions, axis=0)
def predict(self, X):
return self.classes_.take(np.argmax(self.predict_proba(X), axis=1))
|
import sys
import os
import dotenv
try:
KEYS = ('PYIGL_PATH', 'SDF_PATH')
dotenv.load_dotenv(verbose=True)
for key in KEYS:
_path = os.environ.get(key, '')
_path = os.path.abspath(os.path.normpath(_path))
if os.path.exists(_path):
sys.path.insert(0, os.path.abspath(_path))
try:
import igl
except:
#print("[pyigl_import] module igl not found. trying to import pyigl")
import pyigl as igl
except ImportError as e:
raise e |
# Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from networking_bagpipe.agent import bagpipe_bgp_agent
from networking_bagpipe.driver.type_route_target import TYPE_ROUTE_TARGET
from neutron.agent.l2 import agent_extension
from neutron.common import config as common_config
from neutron_lib import constants as n_const
from neutron_lib.utils import helpers
from neutron.plugins.ml2.drivers.agent import _common_agent as ca
from neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_neutron_agent \
import LinuxBridgeManager
LOG = logging.getLogger(__name__)
LB_BAGPIPE_AGENT_BINARY = 'neutron-bagpipe-linuxbridge-agent'
class LinuxBridgeManagerBaGPipe(LinuxBridgeManager):
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == TYPE_ROUTE_TARGET:
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
return (super(LinuxBridgeManagerBaGPipe, self)
.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id))
class BagpipeAgentExtension(agent_extension.AgentCoreResourceExtension):
def initialize(self, connection, driver_type):
# Create an HTTP client for BaGPipe BGP component REST service
self.bagpipe_bgp_agent = bagpipe_bgp_agent.BaGPipeBGPAgent(
n_const.AGENT_TYPE_LINUXBRIDGE,
connection)
def handle_port(self, context, data):
pass
def delete_port(self, context, data):
pass
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
interface_mappings = helpers.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!", e)
sys.exit(1)
LOG.info("Interface mappings: %s", interface_mappings)
try:
bridge_mappings = helpers.parse_mappings(
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
except ValueError as e:
LOG.error("Parsing bridge_mappings failed: %s. "
"Agent terminated!", e)
sys.exit(1)
LOG.info("Bridge mappings: %s", bridge_mappings)
manager = LinuxBridgeManagerBaGPipe(bridge_mappings, interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout,
n_const.AGENT_TYPE_LINUXBRIDGE,
LB_BAGPIPE_AGENT_BINARY)
LOG.info("Agent initialized successfully, now running... ")
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
|
#!/usr/bin/env python3
import numpy as np
import h5py
import argparse
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description="Convert HDF5 to CSV")
parser.add_argument("input", type=Path, help="an HDF5 file")
args = parser.parse_args()
print(f"Reading from HDF5 file: {args.input}")
with h5py.File(args.input, "r") as f:
for name, dset in f.items():
path = args.input.with_name(f"{args.input.stem}_{name}.csv")
print(f"Writing dataset \"{name}\" to CSV file: {path}")
np.savetxt(path, dset, fmt="%0.6f", delimiter=",")
if __name__ == "__main__":
main()
|
from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.urls import reverse_lazy
from .models import Message
from .forms import MessageCreateForm
class MessageCreate(generic.CreateView):
model = Message
form_class = MessageCreateForm
def get_context_data(self, **kwargs):
context = super(MessageCreate, self).get_context_data(**kwargs)
most_viewed_messages = Message.objects.order_by('-views')[:5]
context['most_viewed_messages'] = most_viewed_messages
return context
class MessageDetailView(generic.DetailView):
model = Message
context_object_name = 'message'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
# Increase value of views attribute in the message instance
self.object.views += 1
self.object.save()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
|
from nubes.connectors.openstack import connector as os_connector
# NOTE(blogan): we can probably do a registration of each class that gets
# inherited from via metaclass so we don't have to build this manually. This
# works for now though.
connectors = {os_connector.OpenstackConnector.name():
os_connector.OpenstackConnector}
class Dispatcher(object):
def __init__(self, connector_name, auth_url, username, password,
project_name):
self.connector = connectors.get(connector_name)(auth_url, username,
password, project_name)
def create_server(self, name, image, flavor, networks):
return self.connector.create_server(name, image, flavor, networks)
def list_servers(self):
return self.connector.list_servers()
def delete_server(self, uuid):
self.connector.delete_server(uuid)
|
from __future__ import print_function
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import textwrap
def downloadFromURL(uris=None, fileNames=None, nodeNames=None, loadFiles=None,
customDownloader=None, loadFileTypes=None, loadFileProperties={}):
"""Download and optionally load data into the application.
:param uris: Download URL(s).
:param fileNames: File name(s) that will be downloaded (and loaded).
:param nodeNames: Node name(s) in the scene.
:param loadFiles: Boolean indicating if file(s) should be loaded. By default, the function decides.
:param customDownloader: Custom function for downloading.
:param loadFileTypes: file format name(s) ('VolumeFile' by default).
:param loadFileProperties: custom properties passed to the IO plugin.
If the given ``fileNames`` are not found in the application cache directory, they
are downloaded using the associated URIs.
See ``slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory()``
If not explicitly provided or if set to ``None``, the ``loadFileTypes`` are
guessed based on the corresponding filename extensions.
If a given fileName has the ``.mrb`` or ``.mrml`` extension, it will **not** be loaded
by default. To ensure the file is loaded, ``loadFiles`` must be set.
The ``loadFileProperties`` are common for all files. If different properties
need to be associated with files of different types, downloadFromURL must
be called for each.
"""
return SampleDataLogic().downloadFromURL(
uris, fileNames, nodeNames, loadFiles, customDownloader, loadFileTypes, loadFileProperties)
def downloadSample(sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the first loaded node."""
return SampleDataLogic().downloadSamples(sampleName)[0]
def downloadSamples(sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the loaded nodes."""
return SampleDataLogic().downloadSamples(sampleName)
#
# AstroSampleData
#
class AstroSampleData(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Astro Sample Data"
self.parent.categories = ["Astronomy"]
self.parent.dependencies = ["AstroVolume"]
self.parent.contributors = ["""
Davide Punzo (Kapteyn Astronomical Institute),
Thijs van der Hulst (Kapteyn Astronomical Institute) and
Jos Roerdink (Johann Bernoulli Institute)."""]
self.parent.helpText = """
The AstroSampleData module can be used to download data for working with in SlicerAstro.
Use of this module requires an active network connection."""
self.parent.acknowledgementText = """
This module was developed by Davide Punzo. <br>
This work was supported by ERC grant nr. 291531 and the Slicer Community. <br><br>
Data acknowledgement: <br>
WEIN069: Mpati Ramatsoku and Marc Verheijen (Kapteyn Astronomical Institute); <br>
WEIN069_MASK: mask generated using SoFiA (https://github.com/SoFiA-Admin/SoFiA); <br>
NGC2403: THING survey; <br>
NGC2403_DSS: optical image from DSS; <br>
NGC3379 and NGC4111: ATLAS3D survey. <br>
This file has been originally edited by Steve Pieper.
"""
self.parent.icon = qt.QIcon(':Icons/XLarge/NGC2841.png')
self.parent = parent
if slicer.mrmlScene.GetTagByClassName( "vtkMRMLScriptedModuleNode" ) != 'ScriptedModule':
slicer.mrmlScene.RegisterNodeClass(vtkMRMLScriptedModuleNode())
# Trigger the menu to be added when application has started up
if not slicer.app.commandOptions().noMainWindow :
slicer.app.connect("startupCompleted()", self.addMenu)
# allow other modules to register sample data sources by appending
# instances or subclasses SampleDataSource objects on this list
try:
slicer.modules.sampleDataSources
except AttributeError:
slicer.modules.sampleDataSources = {}
def addMenu(self):
actionIcon = self.parent.icon
a = qt.QAction(actionIcon, 'Download Sample Data', slicer.util.mainWindow())
a.setToolTip('Go to the SampleData module to download data from the network')
a.connect('triggered()', self.select)
fileMenu = slicer.util.lookupTopLevelWidget('FileMenu')
if fileMenu:
for action in fileMenu.actions():
if action.text == 'Save':
fileMenu.insertAction(action,a)
def select(self):
m = slicer.util.mainWindow()
m.moduleSelector().selectModule('AstroSampleData')
#
# AstroSampleDataSource
#
class AstroSampleDataSource:
"""
Describe a set of sample data associated with one or multiple URIs and filenames.
"""
def __init__(self, sampleName=None, sampleDescription=None, uris=None, fileNames=None, nodeNames=None, loadFiles=None,
customDownloader=None, thumbnailFileName=None,
loadFileType=None, loadFileProperties={}):
"""
:param sampleName: Name identifying the data set.
:param sampleDescription: Displayed name of data set in SampleData module GUI. (default is ``sampleName``)
:param thumbnailFileName: Displayed thumbnail of data set in SampleData module GUI,
:param uris: Download URL(s).
:param fileNames: File name(s) that will be downloaded (and loaded).
:param nodeNames: Node name(s) in the scene.
:param loadFiles: Boolean indicating if file(s) should be loaded.
:param customDownloader: Custom function for downloading.
:param loadFileType: file format name(s) ('VolumeFile' by default if node name is specified).
:param loadFileProperties: custom properties passed to the IO plugin.
"""
self.sampleName = sampleName
if sampleDescription is None:
sampleDescription = sampleName
self.sampleDescription = sampleDescription
if (isinstance(uris, list) or isinstance(uris, tuple)):
if isinstance(loadFileType, str) or loadFileType is None:
loadFileType = [loadFileType] * len(uris)
if nodeNames is None:
nodeNames = [None] * len(uris)
if loadFiles is None:
loadFiles = [None] * len(uris)
elif isinstance(uris, str):
uris = [uris,]
fileNames = [fileNames,]
nodeNames = [nodeNames,]
loadFiles = [loadFiles,]
loadFileType = [loadFileType,]
updatedFileType = []
for fileName, nodeName, fileType in zip(fileNames, nodeNames, loadFileType):
# If not explicitly specified, attempt to guess fileType
if fileType is None:
if nodeName is not None:
# TODO: Use method from Slicer IO logic ?
fileType = "VolumeFile"
else:
ext = os.path.splitext(fileName.lower())[1]
if ext in [".mrml", ".mrb"]:
fileType = "SceneFile"
elif ext in [".zip"]:
fileType = "ZipFile"
updatedFileType.append(fileType)
self.uris = uris
self.fileNames = fileNames
self.nodeNames = nodeNames
self.loadFiles = loadFiles
self.customDownloader = customDownloader
self.thumbnailFileName = thumbnailFileName
self.loadFileType = updatedFileType
self.loadFileProperties = loadFileProperties
if not len(uris) == len(fileNames) == len(nodeNames) == len(loadFiles) == len(updatedFileType):
raise Exception("All fields of sample data source must have the same length")
def __str__(self):
output = [
"sampleName : %s" % self.sampleName,
"sampleDescription : %s" % self.sampleDescription,
"thumbnailFileName : %s" % self.thumbnailFileName,
"loadFileProperties: %s" % self.loadFileProperties,
"customDownloader : %s" % self.customDownloader,
""
]
for fileName, uri, nodeName, loadFile, fileType in zip(self.fileNames, self.uris, self.nodeNames, self.loadFiles, self.loadFileType):
output.extend([
"fileName : %s" % fileName,
"uri : %s" % uri,
"nodeName : %s" % nodeName,
"loadFile : %s" % loadFile,
"loadFileType: %s" % fileType,
""
])
return "\n".join(output)
#
# SampleData widget
#
class AstroSampleDataWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# This module is often used in developer mode, therefore
# collapse reload & test section by default.
if hasattr(self, "reloadCollapsibleButton"):
self.reloadCollapsibleButton.collapsed = True
self.observerTags = []
self.logic = AstroSampleDataLogic(self.logMessage)
numberOfColumns = 3
iconPath = os.path.join(os.path.dirname(__file__).replace('\\','/'), 'Resources','Icons')
desktop = qt.QDesktopWidget()
mainScreenSize = desktop.availableGeometry(desktop.primaryScreen)
iconSize = qt.QSize(mainScreenSize.width()/15,mainScreenSize.height()/10)
categories = sorted(slicer.modules.sampleDataSources.keys())
if self.logic.builtInCategoryName in categories:
categories.remove(self.logic.builtInCategoryName)
if 'AstronomicalData' in categories:
categories.remove('AstronomicalData')
categories.insert(0, 'AstronomicalData')
for category in categories:
if category == self.logic.developmentCategoryName and self.developerMode is False:
continue
frame = ctk.ctkCollapsibleGroupBox(self.parent)
self.layout.addWidget(frame)
frame.title = category
frame.name = '%sCollapsibleGroupBox' % category
layout = qt.QGridLayout(frame)
columnIndex = 0
rowIndex = 0
for source in slicer.modules.sampleDataSources[category]:
name = source.sampleDescription
if not name:
name = source.nodeNames[0]
b = qt.QToolButton()
b.setText(name)
# Set thumbnail
if source.thumbnailFileName:
# Thumbnail provided
thumbnailImage = source.thumbnailFileName
else:
# Look for thumbnail image with the name of any node name with .png extension
thumbnailImage = None
for nodeName in source.nodeNames:
if not nodeName:
continue
thumbnailImageAttempt = os.path.join(iconPath, nodeName+'.png')
if os.path.exists(thumbnailImageAttempt):
thumbnailImage = thumbnailImageAttempt
break
if thumbnailImage and os.path.exists(thumbnailImage):
b.setIcon(qt.QIcon(thumbnailImage))
b.setIconSize(iconSize)
b.setToolButtonStyle(qt.Qt.ToolButtonTextUnderIcon)
qSize = qt.QSizePolicy()
qSize.setHorizontalPolicy(qt.QSizePolicy.Expanding)
b.setSizePolicy(qSize)
b.name = '%sPushButton' % name
layout.addWidget(b, rowIndex, columnIndex)
columnIndex += 1
if columnIndex==numberOfColumns:
rowIndex += 1
columnIndex = 0
if source.customDownloader:
b.connect('clicked()', source.customDownloader)
else:
b.connect('clicked()', lambda s=source: self.logic.downloadFromSource(s))
self.log = qt.QTextEdit()
self.log.readOnly = True
self.layout.addWidget(self.log)
self.logMessage('<p>Status: <i>Idle</i>')
# Add spacer to layout
self.layout.addStretch(1)
def logMessage(self, message, logLevel=logging.INFO):
# Set text color based on log level
if logLevel >= logging.ERROR:
message = '<font color="red">' + message + '</font>'
elif logLevel >= logging.WARNING:
message = '<font color="orange">' + message + '</font>'
# Show message in status bar
doc = qt.QTextDocument()
doc.setHtml(message)
slicer.util.showStatusMessage(doc.toPlainText(),3000)
# Show message in log window at the bottom of the module widget
self.log.insertHtml(message)
self.log.insertPlainText('\n')
self.log.ensureCursorVisible()
self.log.repaint()
logging.log(logLevel, message)
slicer.app.processEvents(qt.QEventLoop.ExcludeUserInputEvents)
#
# SampleData logic
#
class AstroSampleDataLogic:
"""Manage the slicer.modules.sampleDataSources dictionary.
The dictionary keys are categories of sample data sources.
The BuiltIn category is managed here. Modules or extensions can
register their own sample data by creating instances of the
AstroSampleDataSource class. These instances should be stored in a
list that is assigned to a category following the model
used in registerBuiltInSampleDataSources below.
"""
@staticmethod
def registerCustomSampleDataSource(category='Custom',
sampleName=None, uris=None, fileNames=None, nodeNames=None,
customDownloader=None, thumbnailFileName=None,
loadFileType='VolumeFile', loadFiles=None, loadFileProperties={}):
"""Adds custom data sets to SampleData.
:param category: Section title of data set in SampleData module GUI.
:param sampleName: Displayed name of data set in SampleData module GUI.
:param thumbnailFileName: Displayed thumbnail of data set in SampleData module GUI,
:param uris: Download URL(s).
:param fileNames: File name(s) that will be loaded.
:param nodeNames: Node name(s) in the scene.
:param customDownloader: Custom function for downloading.
:param loadFileType: file format name(s) ('VolumeFile' by default).
:param loadFiles: Boolean indicating if file(s) should be loaded. By default, the function decides.
:param loadFileProperties: custom properties passed to the IO plugin.
"""
try:
slicer.modules.sampleDataSources
except AttributeError:
slicer.modules.sampleDataSources = {}
if category not in slicer.modules.sampleDataSources:
slicer.modules.sampleDataSources[category] = []
slicer.modules.sampleDataSources[category].append(AstroSampleDataSource(
sampleName=sampleName,
uris=uris,
fileNames=fileNames,
nodeNames=nodeNames,
thumbnailFileName=thumbnailFileName,
loadFileType=loadFileType,
loadFiles=loadFiles,
loadFileProperties=loadFileProperties
))
def __init__(self, logMessage=None):
if logMessage:
self.logMessage = logMessage
self.builtInCategoryName = 'BuiltIn'
self.developmentCategoryName = 'Development'
self.registerBuiltInAstroSampleDataSources()
def registerBuiltInAstroSampleDataSources(self):
"""Fills in the pre-define sample data sources"""
sourceArguments = (
('WEIN069', None, 'http://slicer.kitware.com/midas3/download/item/337752/WEIN069.fits', 'WEIN069.fits', 'WEIN069'),
('WEIN069_MASK', None, 'http://slicer.kitware.com/midas3/download/item/266403/WEIN069_mask.fits', 'WEIN069_mask.fits', 'WEIN069_mask'),
('NGC2403_DSS', None, 'http://slicer.kitware.com/midas3/download/item/365486/NGC2403_DSS.fits', 'NGC2403_DSS.fits', 'NGC2403_DSS'),
('NGC2403', None, 'http://slicer.kitware.com/midas3/download/item/359776/NGC2403.fits+%281%29', 'NGC2403.fits', 'NGC2403'),
('NGC4111', None, 'http://slicer.kitware.com/midas3/download/item/242880/NGC4111.fits', 'NGC4111.fits', 'NGC4111'),
('NGC3379', None, 'http://slicer.kitware.com/midas3/download/item/242866/NGC3379.fits', 'NGC3379.fits', 'NGC3379'),
)
if 'AstronomicalData' not in slicer.modules.sampleDataSources:
slicer.modules.sampleDataSources['AstronomicalData'] = []
for sourceArgument in sourceArguments:
slicer.modules.sampleDataSources['AstronomicalData'].append(AstroSampleDataSource(*sourceArgument))
def downloadFileIntoCache(self, uri, name):
"""Given a uri and and a filename, download the data into
a file of the given name in the scene's cache"""
destFolderPath = slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory()
if not os.access(destFolderPath, os.W_OK):
try:
os.mkdir(destFolderPath)
except:
self.logMessage('<b>Failed to create cache folder %s</b>' % destFolderPath, logging.ERROR)
if not os.access(destFolderPath, os.W_OK):
self.logMessage('<b>Cache folder %s is not writable</b>' % destFolderPath, logging.ERROR)
return self.downloadFile(uri, destFolderPath, name)
def downloadSourceIntoCache(self, source):
"""Download all files for the given source and return a
list of file paths for the results"""
filePaths = []
for uri,fileName in zip(source.uris,source.fileNames):
filePaths.append(self.downloadFileIntoCache(uri, fileName))
return filePaths
def downloadFromSource(self,source,attemptCount=0):
"""Given an instance of AstroSampleDataSource, downloads the associated data and
load them into Slicer if it applies.
The function always returns a list.
Based on the fileType(s), nodeName(s) and loadFile(s) associated with
the source, different values may be appended to the returned list:
- if nodeName is specified, appends loaded nodes but if ``loadFile`` is False appends downloaded filepath
- if fileType is ``SceneFile``, appends downloaded filepath
- if fileType is ``ZipFile``, appends directory of extracted archive but if ``loadFile`` is False appends downloaded filepath
If no ``nodeNames`` and no ``fileTypes`` are specified or if ``loadFiles`` are all False,
returns the list of all downloaded filepaths.
"""
nodes = []
filePaths = []
for uri,fileName,nodeName,loadFile,loadFileType in zip(source.uris,source.fileNames,source.nodeNames,source.loadFiles,source.loadFileType):
current_source = AstroSampleDataSource(uris=uri, fileNames=fileName, nodeNames=nodeName, loadFiles=loadFile, loadFileType=loadFileType, loadFileProperties=source.loadFileProperties)
filePath = self.downloadFileIntoCache(uri, fileName)
filePaths.append(filePath)
if loadFileType == 'ZipFile':
if loadFile == False:
nodes.append(filePath)
continue
outputDir = slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory() + "/" + os.path.splitext(os.path.basename(filePath))[0]
qt.QDir().mkpath(outputDir)
success = slicer.util.extractArchive(filePath, outputDir)
if not success and attemptCount < 5:
file = qt.QFile(filePath)
if not file.remove():
self.logMessage('<b>Load failed! Unable to delete and try again loading %s!</b>' % filePath, logging.ERROR)
nodes.append(None)
break
attemptCount += 1
self.logMessage('<b>Load failed! Trying to download again (%d of 5 attempts)...</b>' % (attemptCount), logging.ERROR)
outputDir = self.downloadFromSource(current_source,attemptCount)[0]
nodes.append(outputDir)
elif loadFileType == 'SceneFile':
if not loadFile:
nodes.append(filePath)
continue
success = self.loadScene(filePath, source.loadFileProperties)
if not success and attemptCount < 5:
file = qt.QFile(filePath)
if not file.remove():
self.logMessage('<b>Load failed! Unable to delete and try again loading %s!</b>' % filePath, logging.ERROR)
nodes.append(None)
break
attemptCount += 1
self.logMessage('<b>Load failed! Trying to download again (%d of 5 attempts)...</b>' % (attemptCount), logging.ERROR)
filePath = self.downloadFromSource(current_source,attemptCount)[0]
nodes.append(filePath)
elif nodeName:
if loadFile == False:
nodes.append(filePath)
continue
loadedNode = self.loadNode(filePath, nodeName, loadFileType, source.loadFileProperties)
if loadedNode is None and attemptCount < 5:
file = qt.QFile(filePath)
if not file.remove():
self.logMessage('<b>Load failed! Unable to delete and try again loading %s!</b>' % filePath, logging.ERROR)
nodes.append(None)
break
attemptCount += 1
self.logMessage('<b>Load failed! Trying to download again (%d of 5 attempts)...</b>' % (attemptCount), logging.ERROR)
loadedNode = self.downloadFromSource(current_source,attemptCount)[0]
nodes.append(loadedNode)
if nodes:
return nodes
else:
return filePaths
def sourceForSampleName(self,sampleName):
"""For a given sample name this will search the available sources.
Returns SampleDataSource instance."""
for category in slicer.modules.sampleDataSources.keys():
for source in slicer.modules.sampleDataSources[category]:
if sampleName == source.sampleName:
return source
return None
def downloadFromURL(self, uris=None, fileNames=None, nodeNames=None, loadFiles=None,
customDownloader=None, loadFileTypes=None, loadFileProperties={}):
"""Download and optionally load data into the application.
:param uris: Download URL(s).
:param fileNames: File name(s) that will be downloaded (and loaded).
:param nodeNames: Node name(s) in the scene.
:param loadFiles: Boolean indicating if file(s) should be loaded. By default, the function decides.
:param customDownloader: Custom function for downloading.
:param loadFileTypes: file format name(s) ('VolumeFile' by default).
:param loadFileProperties: custom properties passed to the IO plugin.
If the given ``fileNames`` are not found in the application cache directory, they
are downloaded using the associated URIs.
See ``slicer.mrmlScene.GetCacheManager().GetRemoteCacheDirectory()``
If not explicitly provided or if set to ``None``, the ``loadFileTypes`` are
guessed based on the corresponding filename extensions.
If a given fileName has the ``.mrb`` or ``.mrml`` extension, it will **not** be loaded
by default. To ensure the file is loaded, ``loadFiles`` must be set.
The ``loadFileProperties`` are common for all files. If different properties
need to be associated with files of different types, downloadFromURL must
be called for each.
"""
return self.downloadFromSource(AstroSampleDataSource(
uris=uris, fileNames=fileNames, nodeNames=nodeNames, loadFiles=loadFiles,
loadFileType=loadFileTypes, loadFileProperties=loadFileProperties
))
def downloadSample(self,sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the first loaded node."""
return self.downloadSamples(sampleName)[0]
def downloadSamples(self,sampleName):
"""For a given sample name this will search the available sources
and load it if it is available. Returns the loaded nodes."""
source = self.sourceForSampleName(sampleName)
nodes = []
if source:
nodes = self.downloadFromSource(source)
return nodes
def logMessage(self,message):
print(message)
def humanFormatSize(self,size):
""" from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size"""
for x in ['bytes','KB','MB','GB']:
if size < 1024.0 and size > -1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
return "%3.1f %s" % (size, 'TB')
def reportHook(self,blocksSoFar,blockSize,totalSize):
# we clamp to 100% because the blockSize might be larger than the file itself
percent = min(int((100. * blocksSoFar * blockSize) / totalSize), 100)
if percent == 100 or (percent - self.downloadPercent >= 10):
# we clamp to totalSize when blockSize is larger than totalSize
humanSizeSoFar = self.humanFormatSize(min(blocksSoFar * blockSize, totalSize))
humanSizeTotal = self.humanFormatSize(totalSize)
self.logMessage('<i>Downloaded %s (%d%% of %s)...</i>' % (humanSizeSoFar, percent, humanSizeTotal))
self.downloadPercent = percent
def downloadFile(self, uri, destFolderPath, name):
filePath = destFolderPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
import urllib.request, urllib.parse, urllib.error
self.logMessage('<b>Requesting download</b> <i>%s</i> from %s...' % (name, uri))
# add a progress bar
self.downloadPercent = 0
try:
urllib.request.urlretrieve(uri, filePath, self.reportHook)
self.logMessage('<b>Download finished</b>')
except IOError as e:
self.logMessage('<b>\tDownload failed: %s</b>' % e, logging.ERROR)
else:
self.logMessage('<b>File already exists in cache - reusing it.</b>')
return filePath
def loadScene(self, uri, fileProperties = {}):
self.logMessage('<b>Requesting load</b> %s...' % uri)
fileProperties['fileName'] = uri
success = slicer.app.coreIOManager().loadNodes('SceneFile', fileProperties)
if not success:
self.logMessage('<b>\tLoad failed!</b>', logging.ERROR)
return False
self.logMessage('<b>Load finished</b>')
return True
def loadNode(self, uri, name, fileType = 'VolumeFile', fileProperties = {}):
self.logMessage('<b>Requesting load</b> <i>%s</i> from %s...' % (name, uri))
fileProperties['fileName'] = uri
fileProperties['name'] = name
fileProperties['center'] = True
if "mask" in name:
fileProperties['labelmap'] = True
firstLoadedNode = None
loadedNodes = vtk.vtkCollection()
success = slicer.app.coreIOManager().loadNodes(fileType, fileProperties, loadedNodes)
if not success or loadedNodes.GetNumberOfItems()<1:
self.logMessage('<b>\tLoad failed!</b>', logging.ERROR)
return None
self.logMessage('<b>Load finished</b>')
# since nodes were read from a temp directory remove the storage nodes
for i in range(loadedNodes.GetNumberOfItems()):
loadedNode = loadedNodes.GetItemAsObject(i)
if not loadedNode.IsA("vtkMRMLStorableNode"):
continue
storageNode = loadedNode.GetStorageNode()
if not storageNode:
continue
slicer.mrmlScene.RemoveNode(storageNode)
loadedNode.SetAndObserveStorageNodeID(None)
return loadedNodes.GetItemAsObject(0)
|
#nothing
from numpy.testing import Tester
test = Tester().test
|
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"api_end_point": "apiEndPoint",
"api_version": "apiVersion",
"application_protocol": "applicationProtocol",
"cpu_limit": "cpuLimit",
"data_amount": "dataAmount",
"data_unit": "dataUnit",
"docker_image": "dockerImage",
"end_ip": "endIp",
"environment_variables": "environmentVariables",
"header_condition": "headerCondition",
"header_name": "headerName",
"header_value": "headerValue",
"ingress_hostname": "ingressHostname",
"ip_condition": "ipCondition",
"max_replicas": "maxReplicas",
"memory_limit": "memoryLimit",
"min_replicas": "minReplicas",
"req_memory": "reqMemory",
"request_count": "requestCount",
"request_cpu": "requestCPU",
"security_config": "securityConfig",
"specific_ip": "specificIp",
"start_ip": "startIp",
"stop_on_quota_reach": "stopOnQuotaReach",
"swagger_configmap_names": "swaggerConfigmapNames",
"target_port": "targetPort",
"time_unit": "timeUnit",
"unit_time": "unitTime",
"update_time_stamp": "updateTimeStamp",
"validate_allowed_ap_is": "validateAllowedAPIs",
"validate_subscription": "validateSubscription",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"apiEndPoint": "api_end_point",
"apiVersion": "api_version",
"applicationProtocol": "application_protocol",
"cpuLimit": "cpu_limit",
"dataAmount": "data_amount",
"dataUnit": "data_unit",
"dockerImage": "docker_image",
"endIp": "end_ip",
"environmentVariables": "environment_variables",
"headerCondition": "header_condition",
"headerName": "header_name",
"headerValue": "header_value",
"ingressHostname": "ingress_hostname",
"ipCondition": "ip_condition",
"maxReplicas": "max_replicas",
"memoryLimit": "memory_limit",
"minReplicas": "min_replicas",
"reqMemory": "req_memory",
"requestCount": "request_count",
"requestCPU": "request_cpu",
"securityConfig": "security_config",
"specificIp": "specific_ip",
"startIp": "start_ip",
"stopOnQuotaReach": "stop_on_quota_reach",
"swaggerConfigmapNames": "swagger_configmap_names",
"targetPort": "target_port",
"timeUnit": "time_unit",
"unitTime": "unit_time",
"updateTimeStamp": "update_time_stamp",
"validateAllowedAPIs": "validate_allowed_ap_is",
"validateSubscription": "validate_subscription",
}
|
import logging
import os
from src.database import Database
from src.irc import TwitchBot
from src.objects.match import Match
from src.objects.waifu import (
LockedBetMessage,
OpenBetExhibitionMessage,
OpenBetMessage,
WinMessage,
)
logger = logging.getLogger(__name__)
FAIL_HOURS = 6
def run() -> None:
username = _get_environment_variable("USERNAME")
oauth_token = _get_environment_variable("OAUTH_TOKEN")
salty_db = Database(_get_environment_variable("DATABASE_PATH"))
irc_bot = TwitchBot(username, oauth_token)
current_match = None
for message in irc_bot.listen():
if isinstance(message, OpenBetMessage):
logger.info(
"New match. %s VS. %s. Tier: %s. Format: %s.",
message.fighter_red,
message.fighter_blue,
message.tier,
message.match_format,
)
salty_db.update_current_match(
fighter_red=message.fighter_red,
fighter_blue=message.fighter_blue,
tier=message.tier,
match_format=message.match_format,
)
if message.match_format != "exhibition":
current_match = Match(message)
else:
current_match = None
elif isinstance(message, OpenBetExhibitionMessage):
logger.info(
"New match. %s VS. %s. Format: exhibition",
message.fighter_red,
message.fighter_blue,
)
salty_db.update_current_match(
fighter_red=message.fighter_red,
fighter_blue=message.fighter_blue,
match_format="exhibition",
)
current_match = None
elif current_match:
if isinstance(message, LockedBetMessage):
success = current_match.update_locked(message)
if success:
logger.info(
"Bets locked. %s ($%s). %s ($%s).",
message.fighter_red,
message.bet_red,
message.fighter_blue,
message.bet_blue,
)
elif isinstance(message, WinMessage):
success = current_match.update_winner(message)
if success:
logger.info("Winner: %s.", message.winner)
try:
salty_db.new_match(current_match)
except ValueError:
logger.error(
"Failed to log current match. %s",
current_match.__dict__,
exc_info=True,
)
def _get_environment_variable(env_var_name: str) -> str:
env_var = os.environ[env_var_name]
if not env_var:
raise ValueError(f"Missing environment variable {env_var_name}")
return env_var
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='datamanagerpkg',
version='0.1',
description='python package to allow communication between Ion Proton and Galaxy',
long_description=readme(),
author='William Digan',
author_email='william.digan@aphp.fr',
license='MIT',
packages=['datamanagerpkg'],
zip_safe=False)
#~ install_requires=[
#~ 'markdown',
#~ ],
#~ include_package_data=True,
|
from fastapi import FastAPI,Request
from fastapi.testclient import TestClient
from main.app import app
from fastapi.responses import PlainTextResponse
import requests
import requests_mock
import sys
sys.path.insert(0, '/home/buinhuphu/Dropbox/FastAPI/covid-chatbot')
import settings
client = TestClient(app)
def test_health():
response = client.get("/stats/health-check")
assert response.status_code == 200
assert response.json() == {"Message":'healthy stats endpoint'}
def test_getStatsUs(requests_mock):
payload = [{
"country": "USA",
"confirmed": 1066878,
"recovered": 147473,
"critical": 18221,
"deaths": 61797,
"latitude": 37.09024,
"longitude": -95.712891
}]
requests_mock.get(settings.BASE_URL + "/stats/usa", json=payload)
assert requests.get(settings.BASE_URL + "/stats/usa").status_code == 200
assert requests.get(settings.BASE_URL + "/stats/usa").json() == payload
def test_getStatsUk(requests_mock):
payload = [{
"country": "UK",
"confirmed": 1066878,
"recovered": 147473,
"critical": 18221,
"deaths": 61797,
"latitude": 37.09024,
"longitude": -95.712891
}]
requests_mock.get(settings.BASE_URL + "/stats/uk", json=payload)
assert requests.get(settings.BASE_URL + "/stats/uk").status_code == 200
assert requests.get(settings.BASE_URL + "/stats/uk").json() == payload |
# Copyright 2021 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# E. van den Berg, "Efficient Bayesian phase estimation using mixed priors"
# arXiv:2007.11629.
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
import bpe
from generic import *
# ----------------------------------------------------------------------
# N2 - Decoherence - Three eigenstates
# ----------------------------------------------------------------------
#instancesA = loadInstances('./cache/experiment_hybrid_87.dat')
#for i in range(len(instancesA)) : instancesA[i].collateMaxWeight(matchIter=1000)
#plotCollatedMu(instancesA,'-',alpha=1,color='C1')
# Purely adaptive -- fail
#instancesA = loadInstances('./cache/experiment_hybrid_113.dat')
#instancesA = loadInstances('./cache/experiment_hybrid_115.dat')
#instancesA = loadInstances('./cache/experiment_hybrid_117.dat')
# Adaptive cyclic -- succeed
#instancesA = loadInstances('./cache/experiment_hybrid_114.dat')
#plotCollatedMu(instancesA,'-',alpha=0.4,color=(1,0,0))
#instancesA = loadInstances('./cache/experiment_hybrid_116.dat')
#plotCollatedMu(instancesA,'-',alpha=0.7,color=(1,0,0))
#instancesA = loadInstances('./cache/experiment_hybrid_118.dat')
#plotCollatedMu(instancesA,'-',alpha=1.0,color=(1,0,0))
# Cyclic with cmax values 50, 80, and 100:
#instancesA = loadInstances('./cache/experiment_hybrid_119.dat')
#instancesA = loadInstances('./cache/experiment_hybrid_120.dat')
#instancesA = loadInstances('./cache/experiment_hybrid_121.dat')
#instancesA = loadInstances('./cache/experiment_hybrid_122.dat') # Same as #120 but with kMax=1000
#instancesA = loadInstances('./cache/experiment_hybrid_123.dat') # Same as #121 but with kMax=1000
# Cyclic with cmax=10
#instancesA = loadInstances('./cache/experiment_hybrid_110.dat')
#plotCollatedMu(instancesA,'-',alpha=0.3,color='C0')
# Cyclic with cmax values 20 and 50
instancesA = loadInstances('./cache/experiment_hybrid_111.dat')
h1=plotCollatedMu(instancesA,'-',alpha=0.3,color='C0')
instancesA = loadInstances('./cache/experiment_hybrid_112.dat')
h2=plotCollatedMu(instancesA,'-',alpha=0.6,color='C1')
# Cyclic with cmax=100, and kmax values 200 and 1000
instancesA = loadInstances('./cache/experiment_hybrid_121.dat')
h3=plotCollatedMu(instancesA,'--',alpha=1.0,color='C2')
instancesA = loadInstances('./cache/experiment_hybrid_123.dat')
h4=plotCollatedMu(instancesA,'-',alpha=1.0,color='C2')
plt.legend([h1[0],h2[0],h3[0],h4[0]],
['Cyclic, $c_{\max}$ = 20',
'Cyclic, $c_{\max}$ = 50',
'Cyclic, $c_{\max}$ = 100, $n_{\max}$ = 200',
'Cyclic, $c_{\max}$ = 100, $n_{\max}$ = 1000'],loc='lower left', fontsize=12)
plt.xlabel('Iteration', fontsize=fontsize)
plt.ylabel('Median phase error', fontsize=fontsize)
plt.xlim([1e1,1e6])
plt.ylim([3e-6,4])
setTickFontsize(fontsize)
exportFigure("FigExperimentHybrid_N2", label='5b')
|
from flask_frozen import Freezer
from main import app
freezer = Freezer(app)
if __name__ == '__main__':
freezer.freeze() |
from __future__ import print_function
import glob2
import os
import warnings
import textract
import requests
import parser1
from flask import (Flask, json, Blueprint, jsonify, redirect, render_template, request,
url_for)
from gensim.summarization import summarize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
from werkzeug import secure_filename
import pdf2txt as pdf
import PyPDF2
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
class ResultElement:
def __init__(self, rank, filename):
self.rank = rank
self.filename = filename
def getfilepath(loc):
temp = str(loc)
temp = temp.replace('\\', '/')
return temp
def res(jobfile):
Resume_Vector = []
Ordered_list_Resume = []
Ordered_list_Resume_Score = []
LIST_OF_FILES = []
LIST_OF_FILES_PDF = []
LIST_OF_FILES_DOC = []
LIST_OF_FILES_DOCX = []
Resumes = []
Temp_pdf = []
print("Path at terminal when executing this file")
print(os.getcwd() + "\n")
os.chdir('./Original_Resumes')
for file in glob2.glob('**/*.pdf', recursive=True):
LIST_OF_FILES_PDF.append(file)
for file in glob2.glob('**/*.doc', recursive=True):
LIST_OF_FILES_DOC.append(file)
for file in glob2.glob('**/*.docx', recursive=True):
LIST_OF_FILES_DOCX.append(file)
LIST_OF_FILES = LIST_OF_FILES_DOC + LIST_OF_FILES_DOCX + LIST_OF_FILES_PDF
# LIST_OF_FILES.remove("antiword.exe")
print("This is LIST OF FILES")
print(LIST_OF_FILES)
# print("Total Files to Parse\t" , len(LIST_OF_PDF_FILES))
print("####### PARSING ########")
for nooo,i in enumerate(LIST_OF_FILES):
Ordered_list_Resume.append(i)
Temp = i.split(".")
if Temp[1] == "pdf" or Temp[1] == "Pdf" or Temp[1] == "PDF":
try:
print("This is PDF" , nooo)
with open(i,'rb') as pdf_file:
read_pdf = PyPDF2.PdfFileReader(pdf_file)
# page = read_pdf.getPage(0)
# page_content = page.extractText()
# Resumes.append(Temp_pdf)
number_of_pages = read_pdf.getNumPages()
for page_number in range(number_of_pages):
page = read_pdf.getPage(page_number)
page_content = page.extractText()
page_content = page_content.replace('\n', ' ')
# page_content.replace("\r", "")
Temp_pdf = str(Temp_pdf) + str(page_content)
# Temp_pdf.append(page_content)
# print(Temp_pdf)
Resumes.extend([Temp_pdf])
Temp_pdf = ''
# f = open(str(i)+str("+") , 'w')
# f.write(page_content)
# f.close()
except Exception as e: print(e)
if Temp[1] == "doc" or Temp[1] == "Doc" or Temp[1] == "DOC":
print("This is DOC" , i)
try:
a = textract.process(i)
a = a.replace(b'\n', b' ')
a = a.replace(b'\r', b' ')
b = str(a)
c = [b]
Resumes.extend(c)
except Exception as e: print(e)
if Temp[1] == "docx" or Temp[1] == "Docx" or Temp[1] == "DOCX":
print("This is DOCX" , i)
try:
a = textract.process(i)
a = a.replace(b'\n', b' ')
a = a.replace(b'\r', b' ')
b = str(a)
c = [b]
Resumes.extend(c)
except Exception as e: print(e)
if Temp[1] == "ex" or Temp[1] == "Exe" or Temp[1] == "EXE":
print("This is EXE" , i)
pass
print("Done Parsing.")
Job_Desc = 0
LIST_OF_TXT_FILES = []
os.chdir('../Job_Description')
f = open(jobfile , 'r')
text = f.read()
#print text
#print(text)
text.encode('utf-8').strip()
try:
tttt = str(text)
#print(1)
#print(tttt)
#tttt = summarize(tttt, word_count=100)
#print(2)
#print(tttt)
text = [tttt]
except:
text = ['None']
f.close()
vectorizer = TfidfVectorizer(stop_words='english')
#print("Text is ",text)
#text.encode('utf-8').strip()
vectorizer.fit(text)
vector = vectorizer.transform(text)
#print("Vector is ",vector)
Job_Desc = vector.toarray()
# print("\n\n")
#print("This is job desc : ",Job_Desc)
os.chdir('../')
for i in Resumes:
text = i
tttt = str(text)
print("i before is",i)
#tttt = unicode(tttt,'UTF-8')
log = open("/home/i346303/Automated-Resume-Screening-System-master-final/test.txt", "w")
print(tttt, file = log)
#cmd = "python parser1.py {0} '{1}'".format('config.xml',i)
tttt = parser1.main1('config.xml',tttt)
print("New TTTT is ",tttt)
#tttt = os.system(cmd)
#print("TTTT after is",tttt)
#sys.stdout = open("/home/i346303/Automated-Resume-Screening-System-master/test.txt", "a+")
#print ("text sys.stdout")
try:
#tttt = summarize(tttt, word_count=100)
text = [tttt]
vector = vectorizer.transform(text)
aaa = vector.toarray()
Resume_Vector.append(vector.toarray())
#print('try')
except:
#print('except')
pass
#print(Resume_Vector)
#print(Ordered_list_Resume,Ordered_list_Resume_Score)
j = 1
for i in Resume_Vector:
samples = i
print("value of i = ",i)
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(samples)
print("value of samples = ",samples)
NearestNeighbors(algorithm='auto', leaf_size=50)
Ordered_list_Resume_Score.extend(neigh.kneighbors(Job_Desc)[0][0].tolist())
print("Job Describtion is ",(Job_Desc)[0][0])
Z = [x for _,x in sorted(zip(Ordered_list_Resume_Score,Ordered_list_Resume))]
#Z_Filter
Y = [x for _,x in sorted(zip(Ordered_list_Resume_Score,Ordered_list_Resume_Score))]
#,reverse=True
print("Value of z is",Z)
print("Value of y is",Y)
print(Ordered_list_Resume)
print(Ordered_list_Resume_Score)
flask_return = []
# for n,i in enumerate(Z):
# print("Rankkkkk\t" , n+1, ":\t" , i)
#for index, (value1, value2) in enumerate(zip(data1, data2)):
# print index, value1 + value2
for i, (n,j) in enumerate(zip(Z,Y)):
# print("Rank\t" , n+1, ":\t" , i)
# flask_return.append(str("Rank\t" , n+1, ":\t" , i))
print("Value of N is",n)
print("Value of i is",i)
print("Value of j is",j)
name = getfilepath(n)
#name = name.split('.')[0]
rank = i+1
score = j
if (score < 0.97):
res = ResultElement((str(rank) + ' --- ' + str(score)) ,name)
flask_return.append(res)
#flask_return.append(score)
# res.printresult()
# print(f"Rank{res.rank+1} :\t {res.filename}")
return flask_return
if __name__ == '__main__':
inputStr = input("")
sear(inputStr)
|
from setuptools import setup, find_packages
with open('README.rst') as fp:
long_description = fp.read()
setup(
name='roviclient',
version='0.1.0',
description='A simple Python client library for the Rovi API',
long_description=long_description,
author='Devin Sevilla',
author_email='dasevilla@gmail.com',
url='https://github.com/dasevilla/rovi-python',
download_url='https://github.com/dasevilla/rovi-python/tarball/master',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'requests>=1.2.0',
],
packages=find_packages(),
)
|
"http://cms.mpi.univie.ac.at/vasp/vasp/POSCAR_file.html"
from __future__ import absolute_import, division, print_function
from libtbx import slots_getstate_setstate
from six.moves import range
from six.moves import zip
class reader(slots_getstate_setstate):
__slots__ = """
title
scale
lattice_vectors
types
type_counts
sites
""".split()
def __init__(O, lines, source_info=None):
assert len(lines) >= 7
O.title = lines[0]
scale_str = lines[1].split()
assert len(scale_str) == 1
O.scale = float(scale_str[0])
O.lattice_vectors = []
for i in [2,3,4]:
vec_str = lines[i].split()
assert len(vec_str) == 3
vec = [float(_) for _ in vec_str]
O.lattice_vectors.append(vec)
i_type_counts = 5
type_counts_str = lines[i_type_counts].split()
assert len(type_counts_str) > 0
_ = type_counts_str[0]
try:
int(_)
except ValueError:
O.types = type_counts_str
i_type_counts = 6
type_counts_str = lines[i_type_counts].split()
assert len(type_counts_str) == len(O.types)
else:
O.types = None
O.type_counts = [int(_) for _ in type_counts_str]
key = lines[i_type_counts+1].strip()
if (key != "Direct"):
from libtbx.utils import Sorry
if (source_info is not None):
from libtbx.str_utils import show_string
s = " of %s" % show_string(source_info)
else:
s = ""
raise Sorry('POSCAR "%s" is not supported (line %d%s).'
% (key, i_type_counts+1+1, s))
n_sites = sum(O.type_counts)
assert len(lines) >= i_type_counts+2+n_sites
O.sites = []
for i in range(i_type_counts+2, i_type_counts+2+n_sites):
site_str = lines[i].split()
assert len(site_str) >= 3
site = [float(_) for _ in site_str[:3]]
O.sites.append(site)
def unit_cell(O):
from scitbx import matrix
vecs = [matrix.col(_) for _ in O.lattice_vectors]
params = [_.length() for _ in vecs]
for i in range(3):
params.append(vecs[(i+1)%3].angle(vecs[(i+2)%3], deg=True))
from cctbx import uctbx
return uctbx.unit_cell(params)
def make_up_types_if_necessary(O, default="Si"):
if (O.types is None):
O.types = [default]*len(O.type_counts)
return O
def scatterers(O, u_iso=0):
assert O.types is not None
from cctbx import xray
from cctbx.array_family import flex
result = flex.xray_scatterer()
sites = iter(O.sites)
for type,count in zip(O.types, O.type_counts):
for _ in range(count):
result.append(xray.scatterer(
label="%s%d"%(type, len(result)+1),
scattering_type=type,
site=next(sites),
u=u_iso))
assert len(result) == len(O.sites)
return result
def xray_structure(O, u_iso=0):
from cctbx import xray
from cctbx import crystal
return xray.structure(
crystal_symmetry=crystal.symmetry(
unit_cell=O.unit_cell(), space_group_symbol="P1"),
scatterers=O.scatterers(u_iso=u_iso))
|
from copy import deepcopy
from typing import NamedTuple, List
from collections import Counter
class Point(NamedTuple):
x: int
y: int
def __add__(self, other: 'Point'):
return Point(self.x + other.x, self.y + other.y)
grid = []
with open('01.txt') as f:
for line in f.readlines():
grid.append(list(line.strip()))
def iterate_lights(old_grid: List[List[str]]) -> List[List[str]]:
x_max = len(grid[0])
y_max = len(grid)
directions = [Point(x, y) for x in (-1, 0, 1) for y in (-1, 0, 1) if Point(x, y) != Point(0, 0)]
new_grid = deepcopy(old_grid)
search_area = [Point(x, y) for y in range(y_max) for x in range(x_max)]
for p in search_area:
lights = Counter([old_grid[sp.y][sp.x] for sp in
filter(lambda dp: 0 <= dp.x < x_max and 0 <= dp.y < y_max, [p + d for d in directions])])
if new_grid[p.y][p.x] == '#':
if 2 <= lights['#'] <= 3:
new_grid[p.y][p.x] = '#'
else:
new_grid[p.y][p.x] = '.'
else:
if lights['#'] == 3:
new_grid[p.y][p.x] = '#'
return new_grid
def iterate_lights_part2(old_grid: List[List[str]]) -> List[List[str]]:
x_max = len(grid[0])
y_max = len(grid)
old_grid[0][0] = '#'
old_grid[0][x_max-1] = '#'
old_grid[y_max-1][0] = '#'
old_grid[y_max-1][x_max-1] = '#'
directions = [Point(x, y) for x in (-1, 0, 1) for y in (-1, 0, 1) if Point(x, y) != Point(0, 0)]
new_grid = deepcopy(old_grid)
search_area = [Point(x, y) for y in range(y_max) for x in range(x_max)]
for p in search_area:
lights = Counter([old_grid[sp.y][sp.x] for sp in
filter(lambda dp: 0 <= dp.x < x_max and 0 <= dp.y < y_max, [p + d for d in directions])])
if new_grid[p.y][p.x] == '#':
if 2 <= lights['#'] <= 3:
new_grid[p.y][p.x] = '#'
else:
new_grid[p.y][p.x] = '.'
else:
if lights['#'] == 3:
new_grid[p.y][p.x] = '#'
new_grid[0][0] = '#'
new_grid[0][x_max-1] = '#'
new_grid[y_max-1][0] = '#'
new_grid[y_max-1][x_max-1] = '#'
return new_grid
def print_grid(grid_data):
for data in grid_data:
print(''.join(data))
print()
if __name__ == '__main__':
part1_grid = deepcopy(grid)
for _ in range(100):
part1_grid = iterate_lights(part1_grid)
g = []
for li in part1_grid:
g.extend(li)
print('Part 1:', Counter(g)['#'])
for _ in range(100):
grid = iterate_lights_part2(deepcopy(grid))
g = []
for li in grid:
g.extend(li)
print('Part 2:', Counter(g)['#'])
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.function.regression.ada_boost_regression import ada_boost_regression_train
from brightics.function.regression.ada_boost_regression import ada_boost_regression_predict
from brightics.common.datasets import load_iris
import unittest
import pandas as pd
import numpy as np
import HtmlTestRunner
import os
class ADABoostRegression(unittest.TestCase):
def setUp(self):
print("*** ADA Boost Regression UnitTest Start ***")
self.testdata = load_iris()
def tearDown(self):
print("*** ADA Boost Regression UnitTest End ***")
def test(self):
ada_train = ada_boost_regression_train(self.testdata, feature_cols=['sepal_length', 'sepal_width', 'petal_length', ], label_col='petal_width', random_state=12345)
ada_model = ada_train['model']['regressor']
estimator_weights = ada_model.estimator_weights_ if hasattr(ada_model, 'estimator_weights_') else None
estimator_errors = ada_model.estimator_errors_ if hasattr(ada_model, 'estimator_errors_') else None
feature_importances = ada_model.feature_importances_ if hasattr(ada_model, 'feature_importances_') else None
np.testing.assert_array_equal([round(x, 15) for x in estimator_weights], [1.413875270282188, 0.842690804421279, 0.745744569599211, 0.849855966774747, 0.873567992253140, 0.412870149785776, 0.735336038549665, 0.948762342244301, 0.119925737139696, 1.084352707003922, 0.215508369140552, 1.261341652523880, 0.693756215973489, 0.815705197279502, 0.107744343744492, 0.892721151562112, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000])
np.testing.assert_array_equal([round(x, 15) for x in estimator_errors], [0.195623543954738, 0.300968372309124, 0.321749243945297, 0.299463072654870, 0.294512419786350, 0.398224113163979, 0.324024864473794, 0.279133793052534, 0.470054447315167, 0.252683191393177, 0.446330465454831, 0.220743020672426, 0.333198005871043, 0.306676087054173, 0.473089941916390, 0.290548596637435, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000])
np.testing.assert_array_equal([round(x, 15) for x in feature_importances], [0.060563052789016, 0.057352925738117, 0.882084021472867])
predict = ada_boost_regression_predict(self.testdata, ada_train['model'])
species = predict['out_table']['species']
prediction = predict['out_table']['prediction']
np.testing.assert_array_equal(species, ['setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'setosa', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'versicolor', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica', 'virginica'])
np.testing.assert_array_equal([round(x, 15) for x in prediction], [0.271428571428571 , 0.194444444444444 , 0.205555555555556 , 0.194444444444444 , 0.271428571428571 , 0.375000000000000 , 0.233333333333333 , 0.247619047619048 , 0.194444444444444 , 0.194444444444444 , 0.275000000000000 , 0.250000000000000 , 0.194444444444444 , 0.194444444444444 , 0.300000000000000 , 0.275000000000000 , 0.275000000000000 , 0.271428571428571 , 0.400000000000000 , 0.271428571428571 , 0.357142857142857 , 0.271428571428571 , 0.271428571428571 , 0.316666666666667 , 0.250000000000000 , 0.271428571428571 , 0.271428571428571 , 0.271428571428571 , 0.247619047619048 , 0.230000000000000 , 0.230000000000000 , 0.247619047619048 , 0.257894736842105 , 0.270000000000000 , 0.194444444444444 , 0.233333333333333 , 0.300000000000000 , 0.194444444444444 , 0.194444444444444 , 0.247619047619048 , 0.271428571428571 , 0.205555555555556 , 0.205555555555556 , 0.366666666666667 , 0.366666666666667 , 0.194444444444444 , 0.316666666666667 , 0.205555555555556 , 0.275000000000000 , 0.233333333333333 , 1.494444444444444 , 1.494444444444444 , 1.933999999999999 , 1.205882352941176 , 1.460000000000000 , 1.460000000000000 , 1.494444444444444 , 1.100000000000000 , 1.494444444444444 , 1.205263157894737 , 1.100000000000000 , 1.350000000000000 , 1.205882352941176 , 1.494444444444444 , 1.242105263157894 , 1.421428571428571 , 1.494444444444444 , 1.205882352941176 , 1.460000000000000 , 1.205263157894737 , 1.620000000000000 , 1.242105263157894 , 1.626315789473684 , 1.460000000000000 , 1.357142857142857 , 1.421428571428571 , 1.823333333333333 , 1.933999999999999 , 1.494444444444444 , 1.093333333333333 , 1.160000000000000 , 1.160000000000000 , 1.205263157894737 , 1.823333333333333 , 1.494444444444444 , 1.494444444444444 , 1.494444444444444 , 1.251851851851852 , 1.280000000000000 , 1.205882352941176 , 1.251851851851852 , 1.494444444444444 , 1.205882352941176 , 1.100000000000000 , 1.242105263157894 , 1.350000000000000 , 1.280000000000000 , 1.357142857142857 , 1.100000000000000 , 1.242105263157894 , 2.197916666666667 , 1.907843137254902 , 1.998387096774192 , 1.933999999999999 , 1.998387096774192 , 1.998387096774192 , 1.494444444444444 , 1.998387096774192 , 1.933999999999999 , 2.197916666666667 , 1.998387096774192 , 1.907843137254902 , 1.998387096774192 , 1.907843137254902 , 1.985882352941177 , 1.998701298701298 , 1.998387096774192 , 2.197916666666667 , 1.985882352941177 , 1.823333333333333 , 2.012903225806451 , 1.823333333333333 , 1.998387096774192 , 1.626315789473684 , 2.197916666666667 , 2.012903225806451 , 1.592000000000000 , 1.823333333333333 , 1.985882352941177 , 1.998387096774192 , 1.998387096774192 , 2.197916666666667 , 1.985882352941177 , 1.875342465753425 , 1.825000000000001 , 1.998387096774192 , 2.127500000000000 , 1.998701298701298 , 1.620000000000000 , 1.998701298701298 , 1.998701298701298 , 1.998387096774192 , 1.907843137254902 , 2.012903225806451 , 2.197916666666667 , 1.985882352941177 , 1.823333333333333 , 1.985882352941177 , 2.127500000000000 , 1.933999999999999])
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
|
import torch
import torch.nn as nn
lstm = nn.LSTM(300, 100, 2,bidirectional=True)
x = torch.randn(7, 16, 300)
h0 = torch.randn(4, 16, 100)
c0 = torch.randn(4, 16, 100)
output, (hn, cn)=lstm(x, (h0, c0))
print('output {}'.format(output.size()))
print('hn {}'.format(hn.size()))
print('cn {}'.format(cn.size()))
modellist=nn.ModuleList([nn.Linear(10,10) for i in range(10)])
for i in range(len(modellist)):
print(modellist[i])
|
"""
Terraform UCTT provisioner plugin
"""
import logging
import json
import os
import subprocess
from typing import Dict, List, Any
from configerus.loaded import LOADED_KEY_ROOT
from configerus.contrib.jsonschema.validate import PLUGIN_ID_VALIDATE_JSONSCHEMA_SCHEMA_CONFIG_LABEL
from configerus.validator import ValidationError
from uctt.plugin import UCTTPlugin, Type
from uctt.fixtures import Fixtures, UCCTFixturesPlugin, UCTT_FIXTURES_CONFIG_FIXTURES_LABEL
from uctt.provisioner import ProvisionerBase
from uctt.output import OutputBase
from uctt.contrib.common import UCTT_PLUGIN_ID_OUTPUT_DICT, UCTT_PLUGIN_ID_OUTPUT_TEXT
logger = logging.getLogger('uctt.contrib.provisioner:terraform')
TERRAFORM_PROVISIONER_CONFIG_LABEL = 'terraform'
""" config label loading the terraform config """
TERRAFORM_PROVISIONER_CONFIG_ROOT_PATH_KEY = 'root.path'
""" config key for a base path that should be used for any relative paths """
TERRAFORM_PROVISIONER_CONFIG_PLAN_PATH_KEY = 'plan.path'
""" config key for the terraform plan path """
TERRAFORM_PROVISIONER_CONFIG_STATE_PATH_KEY = 'state.path'
""" config key for the terraform state path """
TERRAFORM_PROVISIONER_CONFIG_VARS_KEY = 'vars'
""" config key for the terraform vars Dict, which will be written to a file """
TERRAFORM_PROVISIONER_CONFIG_VARS_PATH_KEY = 'vars_path'
""" config key for the terraform vars file path, where the plugin will write to """
TERRAFORM_PROVISIONER_DEFAULT_VARS_FILE = 'uctt_terraform.tfvars.json'
""" Default vars file if none was specified """
TERRAFORM_PROVISIONER_DEFAULT_STATE_SUBPATH = 'mtt-state'
""" Default vars file if none was specified """
TERRAFORM_VALIDATE_JSONSCHEMA = {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'plugin_id': {'type': 'string'},
'root': {
'type': 'object',
'properties': {
'path': {'type': 'string'}
}
},
'plan': {
'type': 'object',
'properties': {
'path': {'type': 'string'}
},
'required': ['path']
},
'state': {
'type': 'object',
'properties': {
'path': {'type': 'string'}
}
},
'vars_path': {
'type': 'string'
},
'vars': {
'type': 'object'
}
}
}
""" Validation jsonschema for terraform config contents """
TERRAFORM_VALIDATE_TARGET = "{}:{}".format(
PLUGIN_ID_VALIDATE_JSONSCHEMA_SCHEMA_CONFIG_LABEL,
TERRAFORM_PROVISIONER_CONFIG_LABEL)
""" configerus validation target to matche the above config, which relates to the bootstrap in __init__.py """
class TerraformProvisionerPlugin(ProvisionerBase, UCCTFixturesPlugin):
""" Terraform provisioner plugin
Provisioner plugin that allows control of and interaction with a terraform
cluster.
## Requirements
1. this plugin uses subprocess to call a terraform binary, so you have to install
terraform in the environment
## Usage
### Plan
The plan must exists somewhere on disk, and be accessible.
You must specify the path and related configuration in config, which are read
in the .prepare() execution.
### Vars/State
This plugin reads TF vars from config and writes them to a vars file. We
could run without relying on vars file, but having a vars file allows cli
interaction with the cluster if this plugin messes up.
You can override where Terraform vars/state files are written to allow sharing
of a plan across test suites.
"""
def __init__(self, environment, instance_id,
label: str = TERRAFORM_PROVISIONER_CONFIG_LABEL, base: Any = LOADED_KEY_ROOT):
""" Run the super constructor but also set class properties
Interpret provided config and configure the object with all of the needed
pieces for executing terraform commands
"""
super(ProvisionerBase, self).__init__(environment, instance_id)
logger.info("Preparing Terraform setting")
self.terraform_config_label = label
""" configerus load label that should contain all of the config """
self.terraform_config_base = base
""" configerus get key that should contain all tf config """
self.terraform_config = self.environment.config.load(
self.terraform_config_label)
""" get a configerus LoadedConfig for the terraform label """
# Run confgerus validation on the config using our above defined
# jsonschema
try:
self.terraform_config.get(
base, validator=TERRAFORM_VALIDATE_TARGET)
except ValidationError as e:
raise ValueError(
"Terraform config failed validation: {}".format(e)) from e
fixtures = self.environment.add_fixtures_from_config(
label=self.terraform_config_label,
base=[self.terraform_config_base, UCTT_FIXTURES_CONFIG_FIXTURES_LABEL])
""" All fixtures added to this provisioner plugin. """
UCCTFixturesPlugin.__init__(self, fixtures)
self.root_path = self.terraform_config.get([self.terraform_config_base, TERRAFORM_PROVISIONER_CONFIG_ROOT_PATH_KEY],
exception_if_missing=False)
""" all relative paths will have this joined as their base """
self.working_dir = self.terraform_config.get([self.terraform_config_base, TERRAFORM_PROVISIONER_CONFIG_PLAN_PATH_KEY],
exception_if_missing=False)
""" all subprocess commands for terraform will be run in this path """
if not self.working_dir:
raise ValueError(
"Plugin config did not give us a working/plan path: {}".format(self.terraform_config.data))
if not os.path.isabs(self.working_dir):
if self.root_path:
self.working_dir = os.path.join(
self.root_path, self.working_dir)
self.working_dir = os.path.abspath(self.working_dir)
state_path = self.terraform_config.get([self.terraform_config_base, TERRAFORM_PROVISIONER_CONFIG_STATE_PATH_KEY],
exception_if_missing=False)
""" terraform state path """
if not state_path:
state_path = os.path.join(
self.working_dir,
TERRAFORM_PROVISIONER_DEFAULT_STATE_SUBPATH)
if not os.path.isabs(state_path):
if self.root_path:
state_path = os.path.join(self.root_path, state_path)
state_path = os.path.abspath(state_path)
self.vars = self.terraform_config.get([self.terraform_config_base, TERRAFORM_PROVISIONER_CONFIG_VARS_KEY],
exception_if_missing=False)
""" List of vars to pass to terraform. Will be written to a file """
if not self.vars:
self.vars = {}
vars_path = self.terraform_config.get([self.terraform_config_base, TERRAFORM_PROVISIONER_CONFIG_VARS_PATH_KEY],
exception_if_missing=False)
""" vars file containing vars which will be written before running terraform """
if not vars_path:
vars_path = os.path.join(
self.working_dir,
TERRAFORM_PROVISIONER_DEFAULT_VARS_FILE)
if not os.path.isabs(vars_path):
if self.root_path:
vars_path = os.path.join(self.root_path, vars_path)
vars_path = os.path.abspath(vars_path)
logger.info("Creating Terraform client")
self.tf = TerraformClient(
working_dir=os.path.realpath(self.working_dir),
state_path=os.path.realpath(state_path),
vars_path=os.path.realpath(vars_path),
variables=self.vars)
""" TerraformClient instance """
# if the cluster is already provisioned then we can get outputs from it
try:
self._get_outputs_from_tf()
except Exception:
pass
def info(self):
""" get info about a provisioner plugin """
plugin = self
client = plugin.tf
info = {}
info['plugin'] = {
'terraform_config_label': plugin.terraform_config_label,
'terraform_config_base': plugin.terraform_config_base
},
info['client'] = {
'vars': client.vars,
'working_dir': client.working_dir,
'state_path': client.state_path,
'vars_path': client.vars_path,
'terraform_bin': client.terraform_bin
}
fixtures = {}
for fixture in self.get_fixtures().to_list():
fixture_info = {
'fixture': {
'type': fixture.type.value,
'plugin_id': fixture.plugin_id,
'instance_id': fixture.instance_id,
'priority': fixture.priority,
}
}
if hasattr(fixture.plugin, 'info'):
plugin_info = fixture.plugin.info()
if isinstance(plugin_info, dict):
fixture_info.update(plugin_info)
fixtures[fixture.instance_id] = plugin_info
info['fixtures'] = fixtures
info['helper'] = {
'commands': {
'init': "{bin} -chdir={working_dir} init".format(bin=client.terraform_bin, working_dir=client.working_dir),
'plan': "{bin} -chdir={working_dir} plan -var-file={vars_path} -state={state_path}".format(bin=client.terraform_bin, working_dir=client.working_dir, vars_path=client.vars_path, state_path=client.state_path),
'apply': "{bin} -chdir={working_dir} apply -var-file={vars_path} -state={state_path}".format(bin=client.terraform_bin, working_dir=client.working_dir, vars_path=client.vars_path, state_path=client.state_path),
'destroy': "{bin} -chdir={working_dir} destroy -var-file={vars_path} -state={state_path}".format(bin=client.terraform_bin, working_dir=client.working_dir, vars_path=client.vars_path, state_path=client.state_path),
'output': "{bin} -chdir={working_dir} output -state={state_path}".format(bin=client.terraform_bin, working_dir=client.working_dir, state_path=client.state_path)
}
}
return info
def prepare(self):
""" run terraform init """
logger.info("Running Terraform INIT")
self.tf.init()
def check(self):
""" Check that the terraform plan is valid """
logger.info("Running Terraform PLAN")
self.tf.plan()
def apply(self):
""" Create all terraform resources described in the plan """
logger.info("Running Terraform APPLY")
self.tf.apply()
self._get_outputs_from_tf()
def destroy(self):
""" Remove all terraform resources in state """
logger.info("Running Terraform DESTROY")
self.tf.destroy()
self.fixtures = Fixtures()
def clean(self):
""" Remove terraform run resources from the plan """
logger.info("Running Terraform CLEAN")
dot_terraform = os.path.join(self.working_dir, '.terraform')
if os.isdir(dot_terraform):
shutil.rmtree(dot_terraform)
""" Cluster Interaction """
def _get_outputs_from_tf(self) -> Fixtures:
""" retrieve an output from terraform
For other UCTT plugins we can just load configuration, and creating
output plugin instances from various value in config.
We do that here, but we also want to check of any outputs exported by
the terraform root module, which we get using the tf client.
If we find a root module output without a matching config output
defintition then we make some assumptions about plugin type and add it
to the list. We make some simple investigation into output plugin types
and pick either the contrib.common.dict or contrib.common.text plugins.
If we find a root module output that matches an output that was declared
in config then we use that. This allows config to define a plugin_id
which will then be populated automatically. If you know what type of
data you are expecting from a particular tf output then you can prepare
and config for it to do things like setting default values.
Priorities can be used in the config.
Returns:
--------
A Fixtures set of plugins.
"""
# now we ask TF what output it nows about and merge together those as
# new output plugins.
# tf.outputs() produces a list of (sensitive:bool, type: [str, object,
# value:Any])
for output_key, output_struct in self.tf.output().items():
# we only know how to create 2 kinds of outputs
output_sensitive = bool(output_struct['sensitive'])
""" Whether or not the output contains sensitive data """
output_type = output_struct['type'][0]
""" String output primitive type (usually string|object|number) """
output_spec = output_struct['type'][1]
""" A structured spec for the type """
output_value = output_struct['value']
""" output value """
# see if we already have an output plugin for this name
fixture = self.fixtures.get_fixture(
type=Type.OUTPUT,
instance_id=output_key,
exception_if_missing=False)
if not fixture:
if output_type == 'object':
fixture = self.environment.add_fixture(
type=Type.OUTPUT,
plugin_id=UCTT_PLUGIN_ID_OUTPUT_DICT,
instance_id=output_key,
priority=self.environment.plugin_priority(delta=5),
arguments={'data': output_value})
else:
fixture = self.environment.add_fixture(
type=Type.OUTPUT,
plugin_id=UCTT_PLUGIN_ID_OUTPUT_TEXT,
instance_id=output_key,
priority=self.environment.plugin_priority(delta=5),
arguments={'text': str(output_value)})
self.fixtures.add_fixture(fixture)
else:
if hasattr(fixture.plugin, 'set_data'):
fixture.plugin.set_data(output_value)
elif hasattr(fixture.plugin, 'set_text'):
fixture.plugin.set_text(str(output_value))
class TerraformClient:
""" Shell client for running terraform using subprocess """
def __init__(self, working_dir: str, state_path: str,
vars_path: str, variables: Dict[str, str]):
"""
Parameters:
-----------
working_dir (str) : string path to the python where the terraform root
module/plan is, so that subprocess/tf can use that as a pwd
state_path (str) : path to where the terraform state should be kept
vars_path (str) : string path to where the vars file should be written.
variables (Dict[str,str]) : terraform variables dict which will be
written to a vars file.
"""
self.vars = variables
self.working_dir = working_dir
self.state_path = state_path
self.vars_path = vars_path
self.terraform_bin = 'terraform'
pass
def state(self):
""" return the terraform state contents """
try:
with open(os.path.join(self.working_dir, 'terraform.tfstate')) as json_file:
return json.load(json_file)
except FileNotFoundError:
logger.debug("Terraform client found no state file")
return None
def init(self):
""" run terraform init
init is something that can be run once for a number of jobs in parallel
we lock the process. If a lock is in place, then we just wait for an
unlock and return.
Other terraform actions lock themselves, and we want to fail if the
operation is locked, but here we just want to skip it.
"""
try:
lockfile = os.path.join(
os.path.dirname(
self.state_path),
'.terraform.mtt_mirantis.init.lock')
if os.path.exists(lockfile):
logger.info(
"terraform .init lock file found. Skipping init, but waiting for it to finish")
time_to_wait = 120
time_counter = 0
while not os.path.exists(lockfile):
time.sleep(5)
time_counter += 5
if time_counter > time_to_wait:
raise BlockingIOError(
"Timed out when waiting for init lock to go away")
else:
os.makedirs(
os.path.dirname(
os.path.abspath(lockfile)),
exist_ok=True)
with open(lockfile, 'w') as lockfile_object:
lockfile_object.write(
"{} is running init".format(str(os.getpid())))
try:
self._run(['init'], with_vars=False, with_state=False)
finally:
os.remove(lockfile)
except subprocess.CalledProcessError as e:
logger.error(
"Terraform client failed to run init in %s: %s",
self.working_dir,
e.output)
raise Exception("Terraform client failed to run init") from e
def apply(self):
""" Apply a terraform plan """
try:
self._run(['apply', '-auto-approve'], with_state=True,
with_vars=True, return_output=False)
except subprocess.CalledProcessError as e:
logger.error(
"Terraform client failed to run apply in %s: %s",
self.working_dir,
e.stderr)
raise Exception(
"Terraform client failed to run : {}".format(e)) from e
def destroy(self):
""" Apply a terraform plan """
try:
self._run(['destroy', '-auto-approve'], with_state=True,
with_vars=True, return_output=False)
except subprocess.CalledProcessError as e:
logger.error(
"Terraform client failed to run init in %s: %s",
self.working_dir,
e.output)
raise Exception("Terraform client failed to run destroy") from e
def output(self, name: str = ''):
""" Retrieve terraform outputs
Run the terraform output command, to retrieve all or one of the outputs.
Outputs are returned always as json as it is the only way to machine
parse outputs properly.
Returns:
--------
If you provided a name, then a single output is returned, otherwise a
dict of outputs is returned.
"""
args = ['output', '-json']
""" collect subprocess args to pass """
try:
if name:
output = self._run(
args, [name], with_vars=False, return_output=True)
else:
output = self._run(args, with_vars=False, return_output=True)
except subprocess.CalledProcessError as e:
logger.error(
"Terraform client failed to run init in %s: %s",
self.working_dir,
e.output)
raise Exception(
"Terraform client failed to retrieve output") from e
return json.loads(output)
def _make_vars_file(self):
""" write the vars file """
vars_path = self.vars_path
try:
os.makedirs(
os.path.dirname(
os.path.abspath(vars_path)),
exist_ok=True)
with open(vars_path, 'w') as var_file:
json.dump(self.vars, var_file, sort_keys=True, indent=4)
except Exception as e:
raise Exception(
"Could not create terraform vars file: {} : {}".format(
vars_path, e)) from e
def _run(self, args: List[str], append_args: List[str] = [
], with_state=True, with_vars=True, return_output=False):
""" Run terraform """
cmd = [self.terraform_bin]
cmd += args
if with_vars:
self._make_vars_file()
cmd += ['-var-file={}'.format(self.vars_path)]
if with_state:
cmd += ['-state={}'.format(self.state_path)]
cmd += append_args
if return_output:
logger.debug(
"running terraform command with output capture: %s",
" ".join(cmd))
exec = subprocess.run(
cmd,
cwd=self.working_dir,
shell=False,
stdout=subprocess.PIPE)
exec.check_returncode()
return exec.stdout.decode('utf-8')
else:
logger.debug("running terraform command: %s", " ".join(cmd))
exec = subprocess.run(
cmd, cwd=self.working_dir, check=True, text=True)
exec.check_returncode()
|
from django.test import TestCase
from .models import Location,Category,Image
import datetime as dt
# Create your tests here.
class LocationTestClass(TestCase):
def setUp(self):
self.Moringa = Location(location='London')
def test_instance(self):
self.assertTrue(isinstance(self.Moringa,Location))
def tearDown(self):
Location.objects.all().delete()
def test_save_method(self):
self.Moringa.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations)>0)
def test_delete_method(self):
self.Moringa.delete_location('London')
locations = Location.objects.all()
self.assertTrue(len(locations)==0)
class CategoryTestClass(TestCase):
def setUp(self):
self.Food = Category(category='Football')
def test_instance(self):
self.assertTrue(isinstance(self.Food,Category))
def tearDown(self):
Category.objects.all().delete()
def test_save_method(self):
self.Food.save_category()
category = Category.objects.all()
self.assertTrue(len(category)>0)
def test_delete_method(self):
self.Food.delete_category('Football')
category = Category.objects.all()
self.assertTrue(len(category)==0)
class ImageTestClass(TestCase):
def setUp(self):
self.test_category = Category(category=list('Football'))
self.test_category.save_category()
self.location = Location(location="London")
self.location.save_location()
self.image = Image(id=1,title="Football",categories=self.test_category,location=self.location,)
self.image.save_image()
def tearDown(self):
Category.objects.all().delete()
Location.objects.all().delete()
Image.objects.all().delete()
#def test_save_image(self):
#images = Image.objects.all()
#self.assertTrue(len(images)>0) |
from bs4 import BeautifulSoup
import requests, re, os, sys
from urllib.request import urlopen
from glob import glob
import pandas as pd
EN_URL = "https://www.jw.org/en/library/bible/nwt/books/"
AM_URL = "https://www.jw.org/am/ላይብረሪ/መጽሐፍ-ቅዱስ/nwt/መጻሕፍት/"
TI_URL = "https://www.jw.org/ti/ቤተ-መጻሕፍቲ/መጽሓፍ-ቅዱስ/nwt/መጻሕፍቲ/"
def get_books(lang_url):
url = requests.get(lang_url)
page =BeautifulSoup(url.text, 'lxml')
books = page.find('select', attrs={'id':'Book'}).text.split('\n')[1:]
for i in range(len(books)):
if(len(books[i].split()) > 1):
hyphen_join = books[i].split()
books[i] = '-'.join(hyphen_join)
return books
en_books = get_books(EN_URL)
am_books = get_books(AM_URL)
ti_books = get_books(TI_URL)
en_books.remove('')
am_books.remove('')
ti_books.remove('')
def write_book_to_file(sub_url, book,lang):
for i in range(len(book)):
os.makedirs("Scrapped/"+lang+book[i])
address = sub_url + book[i]
print(address)
url = requests.get(address)
page = BeautifulSoup(url.text, 'lxml')
chapters = page.find('div', attrs={'class': 'chapters clearfix'}).text.split('\n')[1:]
chapters.remove('')
## Get Chapters for Each book
for ch in chapters:
url1 = requests.get(sub_url + book[i] +'/' + ch)
print(sub_url + book[i] +'/' + ch)
page1 = BeautifulSoup(url1.text,'lxml')
ch1 = page1.find('div',attrs={'id': "bibleText"})
tt = [verses.text.replace(u'\xa0', u' ').replace('\n',' ') for verses in ch1.find_all('span',attrs={'class':'verse'})]
chapter = open("Scrapped/"+lang+book[i]+"/"+str(ch) + ".txt", 'w')
for item in tt:
chapter.write("{}\n".format(item))
write_book_to_file(TI_URL, am_books,"Tigrigna/")
write_book_to_file(AM_URL, am_books,"Amharic/")
write_book_to_file(EN_URL, en_books,"English/")
def merge_books(lang, books):
file_lang = []
for bk in books:
file_lang.append((glob("Scrapped/"+lang+"/" + bk + "/*.txt")))
with open("Scrapped/"+lang+"/All.txt","wb") as write_file:
for f in file_lang:
for i in f:
with open(i,'rb') as r:
write_file.write(r.read())
merge_books("Tigrigna",ti_books)
merge_books("Amharic",am_books)
merge_books("English",en_books)
# Creating a parallel Corpus
ti = pd.read_csv('Scrapped/Tigrigna/All.txt',delimiter="\n",header=None)
ti.columns = ["Tigrigna"]
en = pd.read_csv('Scrapped/English/All.txt',delimiter="\n",header=None)
en.columns = ["English"]
data = pd.concat([en,ti],axis=1)
print(data.head())
data.to_csv("en_ti.csv",index=False)
am = pd.read_fwf('Scrapped/Amharic/All.txt',delimiter="\n",header=None)
am.columns = ["Amharic"]
#reset 'data' dataframe
data = []
data = pd.concat([en,am],axis=1)
print(data.head())
data.to_csv("en_am.csv",index=False)
|
# Databricks notebook source
# MAGIC %run /group07/shared
# COMMAND ----------
import os
import pyspark.sql.functions as f
# COMMAND ----------
OUTPUT_DIR = "/mnt/group07/final_data_product"
# Load FULL CSV Dataset
df_flickr = spark.read.format("CSV").option("delimiter", "\t").schema(get_csv_data_scheme()).load("/mnt/data/flickr/yfcc100m_dataset-*.bz2")
# COMMAND ----------
df_flickr.printSchema()
# COMMAND ----------
# filter on images only
df_flickr_images_only = df_flickr.where(df_flickr.photo_video_marker == 0)
# COMMAND ----------
# select all relevant fields + repartition across cluster TODO
# sort for even partitions
df_flickr_images_only = df_flickr_images_only.sort("id")
# COMMAND ----------
df_visualization = df_flickr_images_only.select("id", "title", "user_nsid", "photo_video_farm_id", "photo_video_server_id", "photo_video_secret", "photo_video_extension_original")
# COMMAND ----------
# Write visualization input (and ensure sorting)
df_visualization.sort("id").write.parquet(os.path.join(OUTPUT_DIR, "visualization_input.parquet"))
# COMMAND ----------
df_visualization.count() |
#!/usr/bin/python -ci=__import__;o=i("os");s=i("sys");a=s.argv;p=o.path;y=p.join(p.dirname(a[1]),".python");o.execv(y,a)
from photons_app.executor import library_setup
from photons_app.special import FoundSerials
from photons_messages import LightMessages
from delfick_project.logging import setup_logging
import logging
async def doit(collector):
lan_target = collector.resolve_target("lan")
msg = LightMessages.GetColor()
async for pkt in lan_target.send(msg, FoundSerials()):
if pkt | LightMessages.LightState:
hsbk = " ".join(
"{0}={1}".format(key, pkt.payload[key])
for key in ("hue", "saturation", "brightness", "kelvin")
)
print("{0}: {1}".format(pkt.serial, hsbk))
if __name__ == "__main__":
setup_logging(level=logging.ERROR)
collector = library_setup()
collector.run_coro_as_main(doit(collector))
|
# -*- coding: utf-8 -*-
"""DNA Center CustomCaller
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ..restsession import RestSession
from ..utils import (
check_type,
apply_path_params,
extract_and_parse_json,
pprint_request_info,
pprint_response_info,
)
import logging
from requests.exceptions import HTTPError
logger = logging.getLogger(__name__)
class CustomCaller(object):
"""DNA Center CustomCaller.
DNA Center CustomCaller allows API creation.
"""
def __init__(self, session, object_factory):
"""Initialize a new CustomCaller object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(CustomCaller, self).__init__()
self._session = session
self._object_factory = object_factory
if self._session._debug:
logger.setLevel(logging.DEBUG)
logger.propagate = True
else:
logger.addHandler(logging.NullHandler())
logger.propagate = False
def add_api(self, name, obj):
"""Adds an api call to the CustomCaller.
Args:
name (str): name you want to set to the api client, has to follow
python variable naming rule.
obj (object): api call which is actually a calling call_api
method.
"""
setattr(self, name, obj)
def call_api(self, method, resource_path, raise_exception=True,
original_response=False,
**kwargs):
"""Handles the requests and response.
Args:
method(basestring): type of request.
resource_path(basestring): URL in the request object.
raise_exception(bool): If True, http exceptions will be raised.
original_response(bool): If True, MyDict (JSON response) is
returned, else response object.
path_params(dict) (optional): Find each path_params' key in the
resource_path and replace it with path_params' value.
params (optional): Dictionary or bytes to be sent in the query
string for the Request.
data (optional): Dictionary, bytes, or file-like object to send in
the body of the Request.
json (optional): json data to send in the body of the Request.
headers (optional): Dictionary of HTTP Headers to send with the
Request.
cookies (optional): Dict or CookieJar object to send with the
Request.
files (optional): Dictionary of 'name': file-like-objects
(or {'name': ('filename', fileobj)}) for multipart encoding
upload.
auth (optional): Auth tuple to enable Basic/Digest/Custom
HTTP Auth.
timeout(float, tuple) (optional): How long to wait for the server
to send data before giving up, as a float, or a (connect
timeout, read timeout) tuple.
allow_redirects(bool) (optional): bool. Set to True if
POST/PUT/DELETE redirect following is allowed.
proxies(optional): Dictionary mapping protocol to the URL of the
proxy.
verify(bool,string) (optional): if True, the SSL cert will be
verified. A CA_BUNDLE path can also be provided as a string.
stream(optional): if False, the response content will be
immediately downloaded.
cert(basestring, tuple) (optional): if String, path to ssl client
cert file (.pem). If Tuple, (‘cert’, ‘key’) pair
Returns:
MyDict or object: If original_response is True returns the
original object response, else returns a JSON response with
access to the object's properties by using the dot notation
or the bracket notation. Defaults to False.
Raises:
TypeError: If the parameter types are incorrect.
HTTPError: If the DNA Center cloud returns an error.
"""
path_params = kwargs.pop('path_params', {})
resource_path = apply_path_params(resource_path, path_params)
# Ensure the url is an absolute URL
abs_url = self._session.abs_url(resource_path)
headers = self._session.headers
if 'headers' in kwargs:
headers.update(kwargs.pop('headers'))
verify = kwargs.pop("verify", self._session.verify)
logger.debug(pprint_request_info(abs_url, method,
headers,
**kwargs))
response = self._session._req_session.request(method,
abs_url,
headers=headers,
verify=verify,
**kwargs)
if raise_exception:
try:
response.raise_for_status()
except HTTPError as e:
logger.debug(pprint_response_info(e.response))
raise e
logger.debug(pprint_response_info(response))
if original_response:
return response
else:
stream = kwargs.get('stream', None)
json_data = extract_and_parse_json(response, ignore=stream)
return self._object_factory('bpm_custom', json_data)
|
"""Training"""
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
from tcl import tcl
from subfunc.showdata import *
FLAGS = tf.app.flags.FLAGS
# =============================================================
# =============================================================
def train(data,
label,
num_class,
list_hidden_nodes,
initial_learning_rate,
momentum,
max_steps,
decay_steps,
decay_factor,
batch_size,
train_dir,
moving_average_decay = 0.9999,
summary_steps = 500,
checkpoint_steps = 10000,
MLP_trainable = True,
save_file = 'model.ckpt',
load_file = None,
random_seed = None):
"""Build and train a model
Args:
data: data. 2D ndarray [num_comp, num_data]
label: labels. 1D ndarray [num_data]
num_class: number of classes
list_hidden_nodes: number of nodes for each layer. 1D array [num_layer]
initial_learning_rate: initial learning rate
momentum: momentum parameter (tf.train.MomentumOptimizer)
max_steps: number of iterations (mini-batches)
decay_steps: decay steps (tf.train.exponential_decay)
decay_factor: decay factor (tf.train.exponential_decay)
batch_size: mini-batch size
train_dir: save directory
moving_average_decay: (option) moving average decay of variables to be saved (tf.train.ExponentialMovingAverage)
summary_steps: (option) interval to save summary
checkpoint_steps: (option) interval to save checkpoint
MLP_trainable: (option) If false, fix MLP layers
save_file: (option) name of model file to save
load_file: (option) name of model file to load
random_seed: (option) random seed
Returns:
"""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Set random_seed
if random_seed is not None:
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
global_step = tf.Variable(0, trainable=False)
# Data holder
data_holder = tf.placeholder(tf.float32, shape=[None, data.shape[0]], name='data')
label_holder = tf.placeholder(tf.int32, shape=[None], name='label')
# Build a Graph that computes the logits predictions from the
# inference model.
logits, feats = tcl.inference(data_holder, list_hidden_nodes, num_class, MLP_trainable=MLP_trainable)
# Calculate loss.
loss, accuracy = tcl.loss(logits, label_holder)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op, lr = tcl.train(loss,
accuracy,
global_step=global_step,
initial_learning_rate=initial_learning_rate,
momentum=momentum,
decay_steps=decay_steps,
decay_factor=decay_factor,
moving_average_decay=moving_average_decay)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=False))
sess.run(init)
# Restore trained parameters from "load_file"
if load_file is not None:
print("Load trainable parameters from {0:s}...".format(load_file))
reader = tf.train.NewCheckpointReader(load_file)
reader_var_to_shape_map = reader.get_variable_to_shape_map()
#
load_vars = tf.get_collection(FLAGS.FILTER_COLLECTION)
# list up vars contained in the file
initialized_vars = []
for lv in load_vars:
if lv.name.split(':')[0] in reader_var_to_shape_map:
print(" {0:s}".format(lv.name))
initialized_vars.append(lv)
# Restore
saver_init = tf.train.Saver(initialized_vars)
saver_init.restore(sess, load_file)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
num_data = data.shape[1]
num_steps_in_epoch = int(np.floor(num_data / batch_size))
for step in range(max_steps):
start_time = time.time()
# Make shuffled batch -----------------------------
if step % num_steps_in_epoch == 0:
step_in_epoch = 0
shuffle_idx = np.random.permutation(num_data)
x_batch = data[:, shuffle_idx[batch_size*step_in_epoch:batch_size*(step_in_epoch+1)]].T
y_batch = label[shuffle_idx[batch_size*step_in_epoch:batch_size*(step_in_epoch+1)]]
step_in_epoch = step_in_epoch + 1
# Run ---------------------------------------------
feed_dict = {data_holder:x_batch, label_holder:y_batch}
_, loss_value, accuracy_value, lr_value = sess.run([train_op, loss, accuracy, lr], feed_dict=feed_dict)
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 100 == 0:
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, lr = %f, loss = %.2f, accuracy = %3.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, lr_value, loss_value, accuracy_value*100,
examples_per_sec, sec_per_batch))
if step % summary_steps == 0:
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % checkpoint_steps == 0:
checkpoint_path = os.path.join(train_dir, save_file)
saver.save(sess, checkpoint_path, global_step=step)
# Save trained model ----------------------------------
save_path = os.path.join(train_dir, save_file)
print("Save model in file: {0:s}".format(save_path))
saver.save(sess, save_path)
|
"""A class to aid in generating random numbers and sequences
It doesn't seem necessary to create an options class since this class will probably not be extended
"""
import random, sys
nts = ['A','C','G','T']
hexchars = '0123456789abcdef'
uuid4special = '89ab'
class RandomSource:
"""You can asign it a seed if you want
:param seed: seed the pseduorandom number generator
:type seed: int
"""
def __init__(self,seed=None):
self._random = random.Random()
if seed: self._random.seed(seed)
def choice(self,arr):
"""Uniform random selection of a member of an list
:param arr: list you want to select an element from
:type arr: list
:return: one element from the list
"""
ind = self.randint(0,len(arr)-1)
return arr[ind]
def random(self):
"""generate a random number
:return: uniform random float between 0 and 1
:rtype: float
"""
return self._random.random()
def gauss(self,mu,sigma):
"""Generate a random number based on a gaussian distribution
:param mu: mean of distribution
:param sigma: standard deveiation of distribution (i think)
:type mu: float
:type sigma: float
"""
return self._random.gauss(mu,sigma)
def randint(self,a,b):
"""Generate a random integer uniform distribution between a and b like randint of the usual random class
:return: random int between a and b
:rtype: int
"""
return self._random.randint(a,b)
def different_random_nt(self,nt):
global nts
"""generate a random nucleotide change. uniform random. will never return itself
:param nt: current nucleotide
:type nt: char
:return: new nucleotide
:rtype: char
"""
return self._random.choice([x for x in nts if x != nt.upper()])
def random_nt(self):
"""Produce a random nucleotide (uniform random)
:return: nucleotide
:rtype: char
"""
global nts
return self._random.choice(nts)
def get_weighted_random_index(self,weights):
"""Return an index of an array based on the weights
if a random number between 0 and 1 is less than an index return the lowest index
:param weights: a list of floats for how to weight each index [w1, w2, ... wN]
:type weights: list
:return: index
:rtype: int
"""
tot = float(sum([float(x) for x in weights]))
fracarray = [weights[0]]
for w in weights[1:]:
prev = fracarray[-1]
fracarray.append(w+prev)
#print fracarray
rnum = self._random.random()*tot
#print rnum
#sys.exit()
for i in range(len(weights)):
if rnum < fracarray[i]: return i
sys.stderr.write("Warning unexpected no random\n")
def uuid4(self):
"""Make an id in the format of UUID4, but keep in mind this could very well be pseudorandom, and if it is you'll not be truely random, and can regenerate same id if same seed"""
return ''.join([hexchars[self.randint(0,15)] for x in range(0,8)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,4)]) + '-' +\
'4'+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
uuid4special[self.randint(0,3)]+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,12)])
|
#! /usr/bin/env python3
# looking for 3 values which satisfy:
#5R
#3R
#R
# tolerance = 0.001 # 0.1%
# tolerance = 0.01 # 1%
#tolerance = 0.005 # 0.5%
tolerance = 0.0025
series_list = ["e6.txt", "e12.txt", "e24.txt", "e48.txt", "e96.txt", "e192.txt"]
series_names = ["E6", "E12", "E24", "E48", "E96", "E192"]
valid_combinations = []
for n, series in enumerate(series_list):
print("Examining series: {}".format(series_names[n]))
with open(series, "r") as f:
values = []
for line in f:
values.append(int(line))
# go through values
for r1 in values:
r3 = 3 * r1
r5 = 5 * r1
for s in values:
if r3 * (1 - tolerance) <= s <= r3 * (1 + tolerance):
for t in values:
if r5 * (1 - tolerance) <= t <= r5 * (1 + tolerance):
# success
valid_combinations.append((r1, s, t))
print("\n*** Values found! ***")
print("Series: {}".format(series_names[n]))
print("R1: {}".format(r1))
print("R3: {}".format(r3))
print("R5: {}\n".format(r5))
# TODO: error calculation
# TODO: determine optimum combination?
print("No R5 for combination of R1={} and R3={}".format(r1, r3))
print("No R3 for R1={}".format(r1))
if valid_combinations == []:
print("\nNo valid combinations found :(\n")
else:
print("\nValid combinations found!\n")
best_combination = valid_combinations[0]
best_error = 100
for c in valid_combinations:
error1 = abs(c[1] - 3*c[0]) / (3*c[0]) * 100
error2 = abs(c[2] - 5*c[0]) / (5*c[0]) * 100
error = max(error1, error2)
if error < best_error:
best_error = error
best_combination = c
print("{} {} {} error: {:.2f}%".format(c[0], c[1], c[2], error))
print("\nBest combination: {} {} {}".format(best_combination[0],
best_combination[1],
best_combination[2]))
print("Percent error: {:.2f}%".format(best_error))
r1 = c[0]
r3 = c[1]
r5 = c[2]
# ideal_divider = [r1, r1, 3*r1, 5*r1, 10*r1, 30*r1, 50*r1, 100*r1, 300*r1, 500*r1]
divider = [2*r1, r3, r5, 10*r1, 10*r3, 10*r5, 100*r1, 100*r3, 100*r5, 1000*r1, 1000*r3, 1000*r5]
voltages = []
for i in range(len(divider)):
v = 10 * sum(divider[:i+1]) / sum(divider)
if v < 1:
voltages.append(str(round(v * 1000, 3)) + " mV")
else:
voltages.append(str(round(v, 3)) + " V")
print(voltages)
print("Total resistance: {}".format(sum(divider)))
|
import numpy as np
from gym import spaces
import gym
class HERGoalEnvWrapper(gym.Env):
def __init__(self, env, activate_sparse_reward=False):
"""
:param env: (causal_world.CausalWorld) the environment to convert.
:param activate_sparse_reward: (bool) True to activate sparse rewards.
"""
super(HERGoalEnvWrapper, self).__init__()
self.env = env
self.metadata = self.env.metadata
self.action_space = env.action_space
current_goal = self.env.get_task().get_achieved_goal().flatten()
goal_space_shape = current_goal.shape
self.action_space = self.env.action_space
if activate_sparse_reward:
self.env.get_task().activate_sparse_reward()
self.observation_space = spaces.Dict(
dict(desired_goal=spaces.Box(-np.inf,
np.inf,
shape=goal_space_shape,
dtype=np.float64),
achieved_goal=spaces.Box(-np.inf,
np.inf,
shape=goal_space_shape,
dtype=np.float64),
observation=self.env.observation_space))
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
self.env.add_wrapper_info({
'her_environment': {
'activate_sparse_reward': activate_sparse_reward
}
})
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
return getattr(self.env, name)
@property
def spec(self):
"""
:return:
"""
return self.env.spec
@classmethod
def class_name(cls):
"""
:return:
"""
return cls.__name__
def step(self, action):
"""
Used to step through the enviroment.
:param action: (nd.array) specifies which action should be taken by
the robot, should follow the same action
mode specified.
:return: (nd.array) specifies the observations returned after stepping
through the environment. Again, it follows the
observation_mode specified.
"""
obs_dict = dict()
normal_obs, reward, done, info = self.env.step(action)
obs_dict['observation'] = normal_obs
obs_dict['achieved_goal'] = info['achieved_goal'].flatten()
obs_dict['desired_goal'] = info['desired_goal'].flatten()
return obs_dict, reward, done, info
def reset(self):
"""
Resets the environment to the current starting state of the environment.
:return: (nd.array) specifies the observations returned after resetting
the environment. Again, it follows the
observation_mode specified.
"""
obs_dict = dict()
normal_obs = self.env.reset()
obs_dict['observation'] = normal_obs
obs_dict['achieved_goal'] = self.env.get_task().get_achieved_goal(
).flatten()
obs_dict['desired_goal'] = self.env.get_task().get_desired_goal(
).flatten()
return obs_dict
def render(self, mode='human', **kwargs):
"""
Returns an RGB image taken from above the platform.
:param mode: (str) not taken in account now.
:return: (nd.array) an RGB image taken from above the platform.
"""
return self.env.render(mode, **kwargs)
def close(self):
"""
closes the environment in a safe manner should be called at the
end of the program.
:return: None
"""
return self.env.close()
def seed(self, seed=None):
"""
Used to set the seed of the environment,
to reproduce the same randomness.
:param seed: (int) specifies the seed number
:return: (int in list) the numpy seed that you can use further.
"""
return self.env.seed(seed)
def compute_reward(self, achieved_goal, desired_goal, info):
"""
Used to calculate the reward given a hypothetical situation that could
be used in hindsight experience replay algorithms variants.
Can only be used in the spare reward setting for the other setting
it can be tricky here.
:param achieved_goal: (nd.array) specifies the achieved goal as bounding boxes of
objects by default.
:param desired_goal: (nd.array) specifies the desired goal as bounding boxes of
goal shapes by default.
:param info: (dict) not used for now.
:return: (float) the final reward achieved given the hypothetical
situation.
"""
return self.env.get_task().compute_reward(achieved_goal, desired_goal,
info)
def __str__(self):
"""
:return:
"""
return '<{}{}>'.format(type(self).__name__, self.env)
def __repr__(self):
"""
:return:
"""
return str(self)
@property
def unwrapped(self):
"""
:return:
"""
return self.env.unwrapped
|
RESULTS_LOGGER = 'results' |
import json
import os
from github3 import login
from trains import Task
def clone_and_queue():
payload_fname = os.getenv('GITHUB_EVENT_PATH')
with open(payload_fname, 'r') as f:
payload = json.load(f)
task_id, _, queue_name = payload.get("comment", {}).get("body", "").partition(" ")[2].partition(" ") # the body should be in the form of /train-model <task_id> <queue_name>
if task_id:
enqueue_task = Task.get_task(task_id=task_id)
# Clone the task to pipe to. This creates a task with status Draft whose parameters can be modified.
gh_issue_number = payload.get("issue", {}).get("number")
cloned_task = Task.clone(
source_task=enqueue_task,
name=f"{task_id} cloned task for github issue {gh_issue_number}"
)
Task.enqueue(cloned_task.id, queue_name=queue_name)
owner, repo = payload.get("repository", {}).get("full_name", "").split("/")
if owner and repo:
gh = login(token=os.getenv("GITHUB_TOKEN"))
if gh:
issue = gh.issue(owner, repo, payload.get("issue", {}).get("number"))
if issue:
issue.create_comment(f"New task, id:{cloned_task.id} is in queue {queue_name}")
else:
print(f'can not comment issue, {payload.get("issue", {}).get("number")}')
else:
print(f"can not log in to gh, {os.getenv('GITHUB_TOKEN')}")
if __name__ == "__main__":
clone_and_queue()
|
import numpy as np
import math
from ..miniworld import MiniWorldEnv, Room
from ..entity import Box
from ..params import DEFAULT_PARAMS
from gym import spaces
class OneRoom(MiniWorldEnv):
"""
Environment in which the goal is to go to a red box
placed randomly in one big room.
"""
def __init__(self, size=10, max_episode_steps=1000, **kwargs):
assert size >= 2
self.size = size
super().__init__(
max_episode_steps=max_episode_steps,
**kwargs
)
# Allow only movement actions (left/right/forward)
self.action_space = spaces.Discrete(self.actions.move_forward+1)
def _gen_world(self):
room = self.add_rect_room(
min_x=0,
max_x=self.size,
min_z=0,
max_z=self.size
)
self.box = self.place_entity(Box(color='red'))
self.place_agent()
def step(self, action):
obs, reward, done, info = super().step(action)
#if self.near(self.box):
reward += self._reward()
# done = True
return obs, reward, done, info
class OneRoomS6(OneRoom):
def __init__(self, max_episode_steps=100, **kwargs):
super().__init__(size=6, max_episode_steps=max_episode_steps, **kwargs)
class OneRoomS6Fast(OneRoomS6):
def __init__(self, forward_step=0.7, turn_step=45):
# Parameters for larger movement steps, fast stepping
params = DEFAULT_PARAMS.no_random()
params.set('forward_step', forward_step)
params.set('turn_step', turn_step)
super().__init__(
max_episode_steps=50,
params=params,
domain_rand=False
)
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""
Defines a widget for getting a scaled value client-side.
"""
from ipywidgets import Widget, register, widget_serialization
from traitlets import Unicode, Instance, Union, Any, Undefined
from ._frontend import module_name, module_version
from .scale import Scale
@register
class ScaledValue(Widget):
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_model_name = Unicode("ScaledValueModel").tag(sync=True)
scale = Instance(Scale).tag(sync=True, **widget_serialization)
input = Union(
[Instance("ipyscales.ScaledValue"), Any()],
allow_none=True,
help="The input to be scaled. If set to another ScaledValue, it will use its output as the input.",
).tag(sync=True, **widget_serialization)
output = Any(
None,
allow_none=True,
read_only=True,
help="Placeholder trait for linking with ipywidgets.jslink(). Not synced.",
).tag(
sync=True
) # Not actually synced, even if sync=True
|
from .util import EasyDict, make_cache_dir_path
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
""" diagnostic_updater for Python.
@author Brice Rebsamen <brice [dot] rebsamen [gmail]>
"""
import rospy
from ._diagnostic_updater import *
class FrequencyStatusParam:
"""A structure that holds the constructor parameters for the FrequencyStatus
class.
Implementation note: the min_freq and max_freq parameters in the C += 1 code
are stored as pointers, so that if they are updated, the new values are used.
To emulate this behavior, we here use a dictionary to hold them: {'min','max'}
freq_bound is a dictionary with keys 'min' and 'max', containing the min
and max acceptable frequencies.
tolerance is the tolerance with which bounds must be satisfied. Acceptable
values are from freq_bound['min'] * (1 - torelance) to
freq_bound['max'] * (1 + tolerance). Common use cases are to set
tolerance to zero, or to assign the same value to freq_bound['min'] and
freq_bound['max']
window_size is the number of events to consider in the statistics.
"""
def __init__(self, freq_bound, tolerance = 0.1, window_size = 5):
"""Creates a filled-out FrequencyStatusParam."""
self.freq_bound = freq_bound
self.tolerance = tolerance
self.window_size = window_size
class FrequencyStatus(DiagnosticTask):
"""A diagnostic task that monitors the frequency of an event.
This diagnostic task monitors the frequency of calls to its tick method,
and creates corresponding diagnostics. It will report a warning if the
frequency is outside acceptable bounds, and report an error if there have
been no events in the latest window.
"""
def __init__(self, params, name = "FrequencyStatus"):
"""Constructs a FrequencyStatus class with the given parameters."""
DiagnosticTask.__init__(self, name)
self.params = params
self.lock = threading.Lock()
self.clear()
def clear(self):
"""Resets the statistics."""
with self.lock:
self.count = 0
curtime = rospy.Time.now()
self.times = [curtime for i in range(self.params.window_size)]
self.seq_nums = [0 for i in range(self.params.window_size)]
self.hist_indx = 0
def tick(self):
"""Signals that an event has occurred."""
with self.lock:
self.count += 1
def run(self, stat):
with self.lock:
curtime = rospy.Time.now()
curseq = self.count
events = curseq - self.seq_nums[self.hist_indx]
window = (curtime - self.times[self.hist_indx]).to_sec()
freq = events / window
self.seq_nums[self.hist_indx] = curseq
self.times[self.hist_indx] = curtime
self.hist_indx = (self.hist_indx + 1) % self.params.window_size
if events == 0:
stat.summary(2, "No events recorded.")
elif freq < self.params.freq_bound['min'] * (1 - self.params.tolerance):
stat.summary(1, "Frequency too low.")
elif self.params.freq_bound.has_key('max') and freq > self.params.freq_bound['max'] * (1 + self.params.tolerance):
stat.summary(1, "Frequency too high.")
else:
stat.summary(0, "Desired frequency met")
stat.add("Events in window", "%d" % events)
stat.add("Events since startup", "%d" % self.count)
stat.add("Duration of window (s)", "%f" % window)
stat.add("Actual frequency (Hz)", "%f" % freq)
if self.params.freq_bound.has_key('max') and self.params.freq_bound['min'] == self.params.freq_bound['max']:
stat.add("Target frequency (Hz)", "%f" % self.params.freq_bound['min'])
if self.params.freq_bound['min'] > 0:
stat.add("Minimum acceptable frequency (Hz)", "%f" % (self.params.freq_bound['min'] * (1 - self.params.tolerance)))
if self.params.freq_bound.has_key('max'):
stat.add("Maximum acceptable frequency (Hz)", "%f" % (self.params.freq_bound['max'] * (1 + self.params.tolerance)))
return stat
class TimeStampStatusParam:
"""A structure that holds the constructor parameters for the TimeStampStatus class.
max_acceptable: maximum acceptable difference between two timestamps.
min_acceptable: minimum acceptable difference between two timestamps.
"""
def __init__(self, min_acceptable = -1, max_acceptable = 5):
"""Creates a filled-out TimeStampStatusParam."""
self.max_acceptable = max_acceptable
self.min_acceptable = min_acceptable
class TimeStampStatus(DiagnosticTask):
"""Diagnostic task to monitor the interval between events.
This diagnostic task monitors the difference between consecutive events,
and creates corresponding diagnostics. An error occurs if the interval
between consecutive events is too large or too small. An error condition
will only be reported during a single diagnostic report unless it
persists. Tallies of errors are also maintained to keep track of errors
in a more persistent way.
"""
def __init__(self, params = TimeStampStatusParam(), name = "Timestamp Status"):
"""Constructs the TimeStampStatus with the given parameters."""
DiagnosticTask.__init__(self, name)
self.params = params
self.lock = threading.Lock()
self.early_count = 0
self.late_count = 0
self.zero_count = 0
self.zero_seen = False
self.max_delta = 0
self.min_delta = 0
self.deltas_valid = False
def tick(self, stamp):
"""Signals an event.
@param stamp The timestamp of the event that will be used in computing
intervals. Can be either a double or a ros::Time.
"""
if not isinstance(stamp, float):
stamp = stamp.to_sec()
with self.lock:
if stamp == 0:
self.zero_seen = True
else:
delta = rospy.Time.now().to_sec() - stamp
if not self.deltas_valid or delta > self.max_delta:
self.max_delta = delta
if not self.deltas_valid or delta < self.min_delta:
self.min_delta = delta
self.deltas_valid = True
def run(self, stat):
with self.lock:
stat.summary(0, "Timestamps are reasonable.")
if not self.deltas_valid:
stat.summary(1, "No data since last update.")
else:
if self.min_delta < self.params.min_acceptable:
stat.summary(2, "Timestamps too far in future seen.")
self.early_count += 1
if self.max_delta > self.params.max_acceptable:
stat.summary(2, "Timestamps too far in past seen.")
self.late_count += 1
if self.zero_seen:
stat.summary(2, "Zero timestamp seen.")
self.zero_count += 1
stat.add("Earliest timestamp delay:", "%f" % self.min_delta)
stat.add("Latest timestamp delay:", "%f" % self.max_delta)
stat.add("Earliest acceptable timestamp delay:", "%f" % self.params.min_acceptable)
stat.add("Latest acceptable timestamp delay:", "%f" % self.params.max_acceptable)
stat.add("Late diagnostic update count:", "%i" % self.late_count)
stat.add("Early diagnostic update count:", "%i" % self.early_count)
stat.add("Zero seen diagnostic update count:", "%i" % self.zero_count)
self.deltas_valid = False
self.min_delta = 0
self.max_delta = 0
self.zero_seen = False
return stat
class Heartbeat(DiagnosticTask):
"""Diagnostic task to monitor whether a node is alive
This diagnostic task always reports as OK and 'Alive' when it runs
"""
def __init__(self):
"""Constructs a HeartBeat"""
DiagnosticTask.__init__(self, "Heartbeat")
def run(self, stat):
stat.summary(0, "Alive")
return stat
|
import frappe
def get_context(context):
x = frappe.form_dict.docname;
context.employee = frappe.get_doc("CompanyEmployeeDetail",x)
return context
|
import unittest
import os
from surround import Surround, Stage, SurroundData, Config
from .stages.first_stage import FirstStage
class HelloStage(Stage):
def operate(self, surround_data, config):
surround_data.text = "hello"
if "helloStage" in config:
surround_data.config_value = config["helloStage"]["suffix"]
class BasicData(SurroundData):
text = None
config_value = None
stage1 = None
stage2 = None
class TestSurround(unittest.TestCase):
def test_happy_path(self):
surround = Surround([HelloStage()])
data = BasicData()
surround.process(data)
self.assertEqual(data.text, "hello")
def test_rejecting_attributes(self):
surround = Surround([HelloStage()])
data = BasicData()
surround.process(data)
self.assertRaises(AttributeError, getattr, data, "no_text")
def test_surround_config(self):
path = os.path.dirname(__file__)
config = Config()
config.read_config_files([os.path.join(path, "config.yaml")])
surround = Surround([HelloStage()])
surround.set_config(config)
data = BasicData()
surround.process(data)
self.assertEqual(data.config_value, "Scott")
def test_surround_override(self):
path = os.path.dirname(__file__)
surround = Surround([FirstStage()])
config = Config()
config.read_config_files([os.path.join(path, "stages.yaml")])
surround.set_config(config)
data = BasicData()
surround.process(data)
self.assertEqual(data.stage1, "first stage")
self.assertEqual(data.stage2, "second stage")
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Identity service."""
from oslo_config import cfg
from oslo_log import log
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
from keystone import exception
from keystone.i18n import _, _LW
from keystone import notifications
from keystone.identity import schema
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('identity_api')
class User(controller.Controller):
collection_name = 'users'
member_name = 'user'
def __init__(self):
super(User, self).__init__()
self.get_member_from_driver = self.identity_api.get_user
@controller.protected()
@validation.validated(schema.user_create, 'user')
def create_user(self, context, user):
self._require_attribute(user, 'name')
# The manager layer will generate the unique ID for users
ref = self._normalize_dict(user)
ref = self._normalize_domain_id(context, ref)
initiator = notifications._get_request_audit_info(context)
ref = self.identity_api.create_user(ref, initiator)
return User.wrap_member(context, ref)
@controller.filterprotected(None, 'domain_id', 'enabled', 'name')
def list_users(self, context, filters):
hints = User.build_driver_hints(context, filters)
refs = self.identity_api.list_users(
domain_scope=self._get_domain_id_for_list_request(context),
hints=hints)
return User.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_user(self, context, user_id):
ref = self.identity_api.get_user(user_id)
return User.wrap_member(context, ref)
@controller.protected()
@validation.validated(schema.user_update, 'user')
def update_user(self, context, user_id, user):
self._require_matching_id(user_id, user)
self._require_matching_domain_id(
user_id, user, self.identity_api.get_user)
initiator = notifications._get_request_audit_info(context)
ref = self.identity_api.update_user(user_id, user, initiator)
return User.wrap_member(context, ref)
@controller.protected()
def delete_user(self, context, user_id):
initiator = notifications._get_request_audit_info(context)
return self.identity_api.delete_user(user_id, initiator)
@controller.protected()
def change_password(self, context, user_id, user):
original_password = user.get('original_password')
if original_password is None:
raise exception.ValidationError(target='user',
attribute='original_password')
password = user.get('password')
if password is None:
raise exception.ValidationError(target='user',
attribute='password')
try:
self.identity_api.change_password(
context, user_id, original_password, password)
except AssertionError:
raise exception.Unauthorized()
def _check_user_and_group_protection(self, context, prep_info,
user_id, group_id):
ref = {}
ref['user'] = self.identity_api.get_user(user_id)
ref['group'] = self.identity_api.get_group(group_id)
self.check_protection(context, prep_info, ref)
@controller.filterprotected(None, 'domain_id', 'enabled', 'name')
def list_users_in_group(self, context, filters, group_id):
hints = User.build_driver_hints(context, filters)
refs = self.identity_api.list_users_in_group(group_id, hints=hints)
return User.wrap_collection(context, refs, hints=hints)
@controller.protected(callback=_check_user_and_group_protection)
def add_user_to_group(self, context, user_id, group_id):
self.identity_api.add_user_to_group(user_id, group_id)
@controller.protected(callback=_check_user_and_group_protection)
def check_user_in_group(self, context, user_id, group_id):
return self.identity_api.check_user_in_group(user_id, group_id)
@controller.protected(callback=_check_user_and_group_protection)
def remove_user_from_group(self, context, user_id, group_id):
self.identity_api.remove_user_from_group(user_id, group_id)
@dependency.requires('identity_api')
class Group(controller.Controller):
collection_name = 'groups'
member_name = 'group'
def __init__(self):
super(Group, self).__init__()
self.get_member_from_driver = self.identity_api.get_group
@controller.protected()
def create_group(self, context, group):
self._require_attribute(group, 'name')
# The manager layer will generate the unique ID for groups
ref = self._normalize_dict(group)
ref = self._normalize_domain_id(context, ref)
initiator = notifications._get_request_audit_info(context)
ref = self.identity_api.create_group(ref, initiator)
return Group.wrap_member(context, ref)
@controller.filterprotected(None, 'domain_id', 'name')
def list_groups(self, context, filters):
hints = Group.build_driver_hints(context, filters)
refs = self.identity_api.list_groups(
domain_scope=self._get_domain_id_for_list_request(context),
hints=hints)
return Group.wrap_collection(context, refs, hints=hints)
@controller.filterprotected(None, 'name')
def list_groups_for_user(self, context, filters, user_id):
hints = Group.build_driver_hints(context, filters)
refs = self.identity_api.list_groups_for_user(user_id, hints=hints)
return Group.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_group(self, context, group_id):
ref = self.identity_api.get_group(group_id)
return Group.wrap_member(context, ref)
@controller.protected()
def update_group(self, context, group_id, group):
self._require_matching_id(group_id, group)
self._require_matching_domain_id(
group_id, group, self.identity_api.get_group)
initiator = notifications._get_request_audit_info(context)
ref = self.identity_api.update_group(group_id, group, initiator)
return Group.wrap_member(context, ref)
@controller.protected()
def delete_group(self, context, group_id):
initiator = notifications._get_request_audit_info(context)
self.identity_api.delete_group(group_id, initiator)
|
#!/usr/bin/env python
import sys
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.nlt as nlt
def main():
"""
NAME
trmaq_magic.py
DESCTIPTION
does non-linear trm acquisisiton correction
SYNTAX
trmaq_magic.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-f MFILE, sets magic_measurements input file
-ft TSPEC, sets thellier_specimens input file
-F OUT, sets output for non-linear TRM acquisition corrected data
-sav save figures and quit
-fmt [png, svg, pdf]
-DM [2, 3] MagIC data model, default 3
DEFAULTS
MFILE: trmaq_measurements.txt
TSPEC: thellier_specimens.txt
OUT: NLT_specimens.txt
"""
meas_file = 'trmaq_measurements.txt'
tspec = "thellier_specimens.txt"
output = 'NLT_specimens.txt'
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
meas_file = input(
"Input magic_measurements file name? [trmaq_measurements.txt] ")
if meas_file == "":
meas_file = "trmaq_measurements.txt"
tspec = input(
" thellier_specimens file name? [thellier_specimens.txt] ")
if tspec == "":
tspec = "thellier_specimens.txt"
output = input(
"File for non-linear TRM adjusted specimen data: [NLTspecimens.txt] ")
if output == "":
output = "NLT_specimens.txt"
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-ft' in sys.argv:
ind = sys.argv.index('-ft')
tspec = sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index('-F')
output = sys.argv[ind+1]
if '-sav' in sys.argv:
save_plots = True
else:
save_plots = False
fmt = pmag.get_named_arg("-fmt", "svg")
#
PLT = {'aq': 1}
if not save_plots:
pmagplotlib.plot_init(PLT['aq'], 5, 5)
#
# get name of file from command line
#
comment = ""
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if 'measurements' not in file_type:
print(file_type, "This is not a valid measurements file ")
sys.exit()
if data_model_num == 2:
spec_col = "er_specimen_name"
lab_field_dc_col = "specimen_lab_field_dc"
int_col = "specimen_int"
meth_col = "magic_method_codes"
treat_dc_col = "treatment_dc_field"
magn_moment_col = "measurement_magn_moment"
experiment_col = "magic_experiment_name"
outfile_type = "pmag_specimens"
else:
spec_col = "specimen"
lab_field_dc_col = "int_treat_dc_field"
int_col = "int_abs"
meth_col = "method_codes"
treat_dc_col = "treat_dc_field"
magn_moment_col = "magn_moment"
experiment_col = "experiment"
outfile_type = "specimens"
sids = pmag.get_specs(meas_data)
specimen = 0
#
# read in thellier_specimen data
#
nrm, file_type = pmag.magic_read(tspec)
PmagSpecRecs= []
while specimen < len(sids):
#
# find corresoponding paleointensity data for this specimen
#
s = sids[specimen]
blab, best = "", ""
for nrec in nrm: # pick out the Banc data for this spec
if nrec[spec_col] == s:
try:
blab = float(nrec[lab_field_dc_col])
except ValueError:
continue
best = float(nrec[int_col])
TrmRec = nrec
break
if blab == "":
print("skipping ", s, " : no best ")
specimen += 1
else:
print(sids[specimen], specimen+1, 'of ',
len(sids), 'Best = ', best*1e6)
MeasRecs = []
#
# find the data from the meas_data file for this specimen
#
for rec in meas_data:
if rec[spec_col] == s:
meths = rec[meth_col].split(":")
methcodes = []
for meth in meths:
methcodes.append(meth.strip())
if "LP-TRM" in methcodes:
MeasRecs.append(rec)
if len(MeasRecs) < 2:
specimen += 1
print('skipping specimen - no trm acquisition data ', s)
#
# collect info for the PmagSpecRec dictionary
#
else:
TRMs, Bs = [], []
for rec in MeasRecs:
Bs.append(float(rec[treat_dc_col]))
TRMs.append(float(rec[magn_moment_col]))
# calculate best fit parameters through TRM acquisition data, and get new banc
NLpars = nlt.NLtrm(Bs, TRMs, best, blab, 0)
#
Mp, Bp = [], []
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
# predicted NRM for this field
npred = nlt.TRM(Bp[-1], NLpars['xopt']
[0], NLpars['xopt'][1])
Mp.append(npred)
pmagplotlib.plot_trm(
PLT['aq'], Bs, TRMs, Bp, Mp, NLpars, rec[experiment_col])
if not save_plots:
pmagplotlib.draw_figs(PLT)
print('Banc= ', float(NLpars['banc'])*1e6)
trmTC = {}
for key in list(TrmRec.keys()):
# copy of info from thellier_specimens record
trmTC[key] = TrmRec[key]
trmTC[int_col] = '%8.3e' % (NLpars['banc'])
trmTC[meth_col] = TrmRec[meth_col]+":DA-NL"
PmagSpecRecs.append(trmTC)
if not save_plots:
ans = input("Return for next specimen, s[a]ve plot ")
if ans == 'a':
Name = {'aq': rec[spec_col]+'_TRM.{}'.format(fmt)}
pmagplotlib.save_plots(PLT, Name)
else:
Name = {'aq': rec[spec_col]+'_TRM.{}'.format(fmt)}
pmagplotlib.save_plots(PLT, Name)
specimen += 1
pmag.magic_write(output, PmagSpecRecs, outfile_type)
if __name__ == "__main__":
main()
|
from .bed_bathing import BedBathingEnv, BedBathingX
class BedBathingPR2Env(BedBathingEnv):
def __init__(self):
super(BedBathingPR2Env, self).__init__(robot_type='pr2', human_control=False)
class BedBathingPR2X(BedBathingX):
def __init__(self):
super(BedBathingPR2X, self).__init__(robot_type='pr2', human_control=False)
class BedBathingBaxterEnv(BedBathingEnv):
def __init__(self):
super(BedBathingBaxterEnv, self).__init__(robot_type='baxter', human_control=False)
class BedBathingSawyerEnv(BedBathingEnv):
def __init__(self):
super(BedBathingSawyerEnv, self).__init__(robot_type='sawyer', human_control=False)
class BedBathingJacoEnv(BedBathingEnv):
def __init__(self):
super(BedBathingJacoEnv, self).__init__(robot_type='jaco', human_control=False)
class BedBathingPR2HumanEnv(BedBathingEnv):
def __init__(self):
super(BedBathingPR2HumanEnv, self).__init__(robot_type='pr2', human_control=True)
class BedBathingBaxterHumanEnv(BedBathingEnv):
def __init__(self):
super(BedBathingBaxterHumanEnv, self).__init__(robot_type='baxter', human_control=True)
class BedBathingSawyerHumanEnv(BedBathingEnv):
def __init__(self):
super(BedBathingSawyerHumanEnv, self).__init__(robot_type='sawyer', human_control=True)
class BedBathingJacoHumanEnv(BedBathingEnv):
def __init__(self):
super(BedBathingJacoHumanEnv, self).__init__(robot_type='jaco', human_control=True)
|
"""Tries to access all OpenEO processes endpoints - A initial version of 'integration tests' for the processess service.
To run them gateway, RabbitMQ and the processes and data service need to be up and running. Do not
forget to provide required environment variables.
Once the 'backend' is running some environment variable for this script need to be specified. In detail USERNAME,
PASSWORD, BACKEND_URL. To provide them you can copy the `sample_auth` file provided in this directory and add a USERNAME
PASSWORD combination existing on the backend. BACKEND_URL needs points to the public gateway url. Execute the copied
script to export the variables.
Then this script can be directly executed with
>>>python ./rest_calls.py
It will perform calls to all processes service endpoints and print the status code. It does not do any checks
automatically, you rather have to examine the return status and responses yourself.
"""
import json
import os
from typing import Dict
import requests
backend_url = os.environ.get('BACKEND_URL')
if backend_url is None:
raise OSError("Environment variable BACKEND_URL needs to be specified!")
basic_auth_url = backend_url + '/credentials/basic'
process_url = backend_url + '/processes'
process_graph_url = backend_url + '/process_graphs'
process_graph_id_url = process_graph_url + '/user_cos'
validation_url = backend_url + '/validation'
def get_auth() -> Dict[str, str]:
"""Try to authenticate and return auth header for subsequent calls or None.
The USERNAME and PASSWORD need to be set as environment variables.
"""
auth_response = requests.get(basic_auth_url, auth=(os.environ.get('USERNAME'), os.environ.get('PASSWORD')))
return {'Authorization': 'Bearer basic//' + auth_response.json()['access_token']}
def add_processes() -> None:
"""Try to add a list of predefined processes to the backend."""
processes = ['absolute',
'add',
'add_dimension',
'aggregate_spatial',
'aggregate_spatial_binary',
'aggregate_temporal',
'all',
'and',
'any',
'apply',
'apply_dimension',
'apply_kernel',
'arccos',
'arcosh',
'arcsin',
'arctan',
'arctan2',
'array_apply',
'array_contains',
'array_element',
'array_filter',
'array_find',
'array_labels',
'arsinh',
'artanh',
'between',
'ceil',
'clip',
'cos',
'cosh',
'count',
'create_raster_cube',
'cummax',
'cummin',
'cumproduct',
'cumsum',
'debug',
'dimension_labels',
'divide',
'drop_dimension',
'e',
'eq',
'exp',
'extrema',
'filter_bands',
'filter_bbox',
'filter_labels',
'filter_spatial',
'filter_temporal',
'first',
'floor',
'gt',
'gte',
'if',
'int',
'is_nan',
'is_nodata',
'is_valid',
'last',
'linear_scale_range',
'ln',
'load_collection',
'load_result',
'load_uploaded_files',
'log',
'lt',
'lte',
'mask',
'mask_polygon',
'max',
'mean',
'median',
'merge_cubes',
'min',
'mod',
'multiply',
'ndvi',
'neq',
'normalized_difference',
'not',
'or',
'order',
'pi',
'power',
'product',
'quantiles',
'rearrange',
'reduce_dimension',
'reduce_dimension_binary',
'rename_dimension',
'rename_labels',
'resample_cube_spatial',
'resample_cube_temporal',
'resample_spatial',
'round',
'run_udf',
'run_udf_externally',
'save_result',
'sd',
'sgn',
'sin',
'sinh',
'sort',
'sqrt',
'subtract',
'sum',
'tan',
'tanh',
'text_begins',
'text_contains',
'text_ends',
'text_merge',
'trim_cube',
'variance',
'xor',
]
for proc in processes:
response = requests.put(f'{backend_url}/processes/{proc}', headers=get_auth())
print(f"{proc}: {response.status_code}") # noqa T001
response = requests.get(f'{backend_url}/processes')
if response.status_code != 200:
print('could not get predefined processes') # noqa T001
return
all_predefined = json.loads(response.content)
for actual in all_predefined['processes']:
ref = requests.get(f'https://raw.githubusercontent.com/Open-EO/openeo-processes/1.0.0/{actual["id"]}.json')
ref_content: dict = json.loads(ref.content)
wrong = {key: {
'actual': actual[key],
'ref': ref_content[key]
} for key in ref_content.keys()
if not (key in ref_content and ref_content[key] == actual[key])}
print(wrong) # noqa T001
def run_process_graphs() -> None:
"""Try to perform simple REST calls to all process service endpoints and print the return status code."""
json_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'process_graph.json')
with open(json_path) as f:
process_graph = json.load(f)
get_all_pre_response = requests.get(process_url)
print(f"Get all predefined request: {get_all_pre_response.status_code}") # noqa T001
get_all_user_response = requests.get(process_graph_url, headers=get_auth())
print(f"Get all request: {get_all_user_response.status_code}") # noqa T001
put_response = requests.put(process_graph_id_url, json=process_graph, headers=get_auth())
print(f"Put request: {put_response.status_code}") # noqa T001
get_all_user_response = requests.get(process_graph_url, headers=get_auth())
print(f"Get all request: {get_all_user_response.status_code}") # noqa T001
get_response = requests.get(process_graph_id_url, headers=get_auth())
print(f"Get request: {get_response.status_code}") # noqa T001
delete_response = requests.delete(process_graph_id_url, headers=get_auth())
print(f"Delete request: {delete_response.status_code}") # noqa T001
validation_response = requests.post(validation_url, json=process_graph, headers=get_auth())
print(f"Validation request: {validation_response.status_code}") # noqa T001
if __name__ == '__main__':
run_process_graphs()
|
"""
# Hello World.
Simplest possible workflow. We define a DAG with a single task: to print "Hello world".
"""
from dagger import DAG, Task
def say_hello_world(): # noqa
print("Hello world")
dag = DAG(
nodes={
"say-hello-world": Task(say_hello_world),
},
)
if __name__ == "__main__":
"""Define a command-line interface for this DAG, using the CLI runtime. Check the documentation to understand why this is relevant or necessary."""
from dagger.runtime.cli import invoke
invoke(dag)
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class TheOnionIE(InfoExtractor):
_VALID_URL = r'(?x)https?://(?:www\.)?theonion\.com/video/[^,]+,(?P<article_id>[0-9]+)/?'
_TEST = {
'url': 'http://www.theonion.com/video/man-wearing-mm-jacket-gods-image,36918/',
'md5': '19eaa9a39cf9b9804d982e654dc791ee',
'info_dict': {
'id': '2133',
'ext': 'mp4',
'title': 'Man Wearing M&M Jacket Apparently Made In God\'s Image',
'description': 'md5:cc12448686b5600baae9261d3e180910',
'thumbnail': 're:^https?://.*\.jpg\?\d+$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
article_id = mobj.group('article_id')
webpage = self._download_webpage(url, article_id)
video_id = self._search_regex(
r'"videoId":\s(\d+),', webpage, 'video ID')
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
sources = re.findall(r'<source src="([^"]+)" type="([^"]+)"', webpage)
if not sources:
raise ExtractorError(
'No sources found for video %s' % video_id, expected=True)
formats = []
for src, type_ in sources:
if type_ == 'video/mp4':
formats.append({
'format_id': 'mp4_sd',
'preference': 1,
'url': src,
})
elif type_ == 'video/webm':
formats.append({
'format_id': 'webm_sd',
'preference': 0,
'url': src,
})
elif type_ == 'application/x-mpegURL':
formats.extend(
self._extract_m3u8_formats(src, video_id, preference=-1))
else:
self.report_warning(
'Encountered unexpected format: %s' % type_)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
}
|
"""Equivalence class based pytests for the pendulum.parse method"""
import pytest
import pendulum
from pendulum.parsing import ParserError
def test_i_16():
text = "5:33:60"
with pytest.raises(ValueError):
pendulum.parse(text)
def test_i_17():
text = "5:33:-01"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_18():
text = "5:33:ab"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_19():
text = "02:34:"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_20():
text = "5:33:001"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_21():
text = "5:60"
with pytest.raises(ValueError):
pendulum.parse(text)
def test_i_22():
text = "5:-01"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_23():
text = "5:aa"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_24():
text = "01:"
with pytest.raises(TypeError): # interestingly this raises a type error
pendulum.parse(text)
def test_i_25():
text = "05:001"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_26():
text = "24:00"
with pytest.raises(ValueError):
pendulum.parse(text)
def test_i_27():
text = "-1:00"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_28():
text = "aa:00"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_29():
text = ":00"
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_30():
text = "001:00"
with pytest.raises(ParserError):
pendulum.parse(text)
# 01 is day
# 02 is month
# 2018 is year
@pytest.mark.parametrize("text",
[
"2018-02-01", # dd-mm-yyyy
]
)
def test_i_31(text):
parsed = pendulum.parse(text)
assert 2018 == parsed.year
assert 1 == parsed.day
assert 2 == parsed.month
@pytest.mark.parametrize("text",
[
"2018-01-02",
]
)
def test_i_31_fail_assert(text):
parsed = pendulum.parse(text)
with pytest.raises(AssertionError):
assert 2018 == parsed.year
assert 1 == parsed.day
assert 2 == parsed.month
@pytest.mark.parametrize("text",
[
"02-2018-01",
"01-2018-02",
"02-01-2018",
"01-02-2018"
]
)
def test_i_31_fail_parse(text):
with pytest.raises(ParserError):
pendulum.parse(text)
def test_i_32():
text = ""
with pytest.raises(ValueError):
pendulum.parse(text)
|
#This file is used to test the code.
import subprocess
def test(filename, method, cutoff, seed):
commandline = ['python', 'TSP.py', '--inst', filename, '--alg',
method, '--time', str(cutoff), '--seed', str(seed)]
subprocess.call(commandline)
if __name__ == '__main__':
file_list = ['burma14.tsp', 'ulysses16.tsp', 'berlin52.tsp',
'kroA100.tsp', 'ch150.tsp', 'gr202.tsp']
seed_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
methods = ['LS2', 'Approx']
for file_name in file_list:
for seed in seed_list:
for method in methods:
test(file_name, method, 600, seed)
|
import random
import numpy as np
import torch
from collections import namedtuple
from SumTree import SumTree
"""
Prioritized experience reply buffer
Get from https://github.com/austinsilveria/Banana-Collection-DQN/blob/master/Banana_DoubleDQN_PER.py which was adjusted from original source: https://jaromiru.com/2016/11/07/lets-make-a-dqn-double-learning-and-prioritized-experience-replay/
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class PreReplayBuffer:
"""Fixed-size buffer to store experience objects."""
def __init__(self, action_size, buffer_size, batch_size, seed, alpha):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
alpha (float): reliance of sampling on prioritization
"""
self.action_size = action_size
self.memory = SumTree(buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "action", "reward", "next_state", "done", "priority"])
self.alpha = alpha
self.max_priority = 0
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done, priority=10):
"""Add a new experience to memory."""
# Assign priority of new experiences to max priority to insure they are played at least once
if len(self.memory) > self.batch_size + 5:
e = self.experience(state, action, reward, next_state, done, self.max_priority)
else:
e = self.experience(state, action, reward, next_state, done, int(priority) ** self.alpha)
self.memory.add(e)
def update_priority(self, new_priorities, indices):
"""Updates priority of experience after learning."""
for new_priority, index in zip(new_priorities, indices):
old_e = self.memory[index]
new_p = new_priority.item() ** self.alpha
new_e = self.experience(old_e.state, old_e.action, old_e.reward, old_e.next_state, old_e.done, new_p)
self.memory.update(index, new_e)
if new_p > self.max_priority:
self.max_priority = new_p
def sample(self):
"""Sample a batch of experiences from memory based on TD Error priority.
Return indices of sampled experiences in order to update their
priorities after learning from them.
"""
experiences = []
indices = []
sub_array_size = self.memory.get_sum() / self.batch_size
for i in range(self.batch_size):
choice = np.random.uniform(sub_array_size * i, sub_array_size * (i + 1))
e, index = self.memory.retrieve(1, choice)
experiences.append(e)
indices.append(index)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
probabilities = torch.from_numpy(
np.vstack([e.priority / self.memory.get_sum() for e in experiences])).float().to(device)
indices = torch.from_numpy(np.vstack([i for i in indices])).int().to(device)
return states, actions, rewards, next_states, dones, probabilities, indices
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
import stribor as st
import torch
def test_ordered_right_half_mask():
gen = st.util.get_mask('ordered_right_half')
assert torch.eq(gen(1), torch.Tensor([1])).all()
assert torch.eq(gen(2), torch.Tensor([0, 1])).all()
assert torch.eq(gen(5), torch.Tensor([0, 0, 1, 1, 1])).all()
def test_ordered_left_half_mask():
gen = st.util.get_mask('ordered_left_half')
assert torch.eq(gen(1), torch.Tensor([1])).all()
assert torch.eq(gen(2), torch.Tensor([1, 0])).all()
assert torch.eq(gen(5), torch.Tensor([1, 1, 0, 0, 0])).all()
def test_parity_even_mask():
gen = st.util.get_mask('parity_even')
assert torch.eq(gen(1), torch.Tensor([1])).all()
assert torch.eq(gen(2), torch.Tensor([0, 1])).all()
assert torch.eq(gen(5), torch.Tensor([0, 1, 0, 1, 0])).all()
def test_parity_odd_mask():
gen = st.util.get_mask('parity_odd')
assert torch.eq(gen(1), torch.Tensor([1])).all()
assert torch.eq(gen(2), torch.Tensor([1, 0])).all()
assert torch.eq(gen(5), torch.Tensor([1, 0, 1, 0, 1])).all()
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, os
from webnotes.modules import scrub, get_module_path, scrub_dt_dn
import webnotes.plugins
def import_files(module, dt=None, dn=None, plugin=None, force=False):
if type(module) is list:
out = []
for m in module:
out.append(import_file(m[0], m[1], m[2], plugin=plugin, force=force))
return out
else:
return import_file(module, dt, dn, plugin=plugin, force=force)
def import_file(module, dt, dn, plugin=None, force=False):
"""Sync a file from txt if modifed, return false if not updated"""
webnotes.flags.in_import = True
dt, dn = scrub_dt_dn(dt, dn)
if plugin:
path = webnotes.plugins.get_path(module, dt, dn, plugin, extn="txt")
else:
path = os.path.join(get_module_path(module),
os.path.join(dt, dn, dn + '.txt'))
ret = import_file_by_path(path, force)
webnotes.flags.in_import = False
return ret
def import_file_by_path(path, force=False):
if os.path.exists(path):
from webnotes.modules.utils import peval_doclist
with open(path, 'r') as f:
doclist = peval_doclist(f.read())
if doclist:
doc = doclist[0]
if not force:
# check if timestamps match
if doc['modified']==str(webnotes.conn.get_value(doc['doctype'], doc['name'], 'modified')):
return False
original_modified = doc["modified"]
import_doclist(doclist)
# since there is a new timestamp on the file, update timestamp in
webnotes.conn.sql("update `tab%s` set modified=%s where name=%s" % \
(doc['doctype'], '%s', '%s'),
(original_modified, doc['name']))
return True
else:
raise Exception, '%s missing' % path
ignore_values = {
"Report": ["disabled"],
}
ignore_doctypes = ["Page Role", "DocPerm"]
def import_doclist(doclist):
doctype = doclist[0]["doctype"]
name = doclist[0]["name"]
old_doc = None
doctypes = set([d["doctype"] for d in doclist])
ignore = list(doctypes.intersection(set(ignore_doctypes)))
if doctype in ignore_values:
if webnotes.conn.exists(doctype, name):
old_doc = webnotes.doc(doctype, name)
# delete old
webnotes.delete_doc(doctype, name, force=1, ignore_doctypes=ignore, for_reload=True)
# don't overwrite ignored docs
doclist1 = remove_ignored_docs_if_they_already_exist(doclist, ignore, name)
# update old values (if not to be overwritten)
if doctype in ignore_values and old_doc:
update_original_values(doclist1, doctype, old_doc)
# reload_new
new_bean = webnotes.bean(doclist1)
new_bean.ignore_children_type = ignore
new_bean.ignore_links = True
new_bean.ignore_validate = True
new_bean.ignore_permissions = True
new_bean.ignore_mandatory = True
if doctype=="DocType" and name in ["DocField", "DocType"]:
new_bean.ignore_fields = True
new_bean.insert()
def remove_ignored_docs_if_they_already_exist(doclist, ignore, name):
doclist1 = doclist
if ignore:
has_records = []
for d in ignore:
if webnotes.conn.get_value(d, {"parent":name}):
has_records.append(d)
if has_records:
doclist1 = filter(lambda d: d["doctype"] not in has_records, doclist)
return doclist1
def update_original_values(doclist, doctype, old_doc):
for key in ignore_values[doctype]:
doclist[0][key] = old_doc.fields[key]
|
#!/usr/bin/env python
# encoding: utf-8
from unittest import TestCase
from ycyc.ycollections import tagmaps
class TestTagMaps(TestCase):
def test_uasge(self):
maps = tagmaps.TagMaps()
@maps.register("add")
def add(x, y):
return x + y
@maps.register("sub")
def sub(x, y):
return x - y
self.assertEqual(maps["add"](1, 2), add(1, 2))
self.assertEqual(maps["sub"](4, 5), sub(4, 5))
with self.assertRaises(KeyError):
maps["noexist"](6, 7)
@maps.register(maps.DefaultKey)
def default(x, y):
return None
self.assertEqual(maps["noexist"](8, 9), default(8, 9))
self.assertListEqual(list(maps), ["add", "sub", ""])
self.assertEqual(list(maps)[0], "add")
self.assertEqual(list(maps)[1], "sub")
self.assertEqual(list(maps)[2], "")
|
import numpy as np
from predict_c3d_ucf101 import run_test
from parse_class_index import parse_cls_indx
from evaluate_predictions import evaluate_prediction
ds_dir = "/home/bassel/data/office-actions/office_actions_19/short_clips/unstabilized_resized_frms_112"
testing_files_dic = {
"all_views":
["/home/bassel/data/office-actions/office_actions_19/short_clips/labels/test_stack_list.txt",
# "conv3d_deepnetA_sport1m_iter_1900000_TF.model",
"c3d_ucf_model-1842",
"../c3d_data_preprocessing/oa_kinetics_calculated_mean.npy"],
# "all_views_oa_kinetics":
# ["/home/bassel/data/oa_kinetics/lbls/oa18_test_stack_mapped_oa11_kinetics.txt",
# "c3d_ucf_model-14698",
# "../c3d_data_preprocessing/oa_kinetics_calculated_mean.npy"]
}#,
# "side_view":
# ["/home/bassel/data/office-actions/office_actions_19/short_clips/labels/side_only_test_stack_list.txt",
# "conv3d_deepnetA_sport1m_iter_1900000_TF.model",
# "../c3d_data_preprocessing/side_action_dataset_calculated_mean.npy"],
# "front_view":
# ["/home/bassel/data/office-actions/office_actions_19/short_clips/labels/front_only_test_stack_list.txt",
# "conv3d_deepnetA_sport1m_iter_1900000_TF.model",
# "../c3d_data_preprocessing/front_action_dataset_calculated_mean.npy"]}
# testing_files_dic = {"stabilizied side view":
# ["/home/bassel/data/office-actions/office_actions_19/short_clips/labels/side_only_test_stack_list.txt",
# "stabilized_side_view_c3d_ucf_model-996",
# "../c3d_data_preprocessing/stabilized_side_action_dataset_calculated_mean.npy"],
# }
TESTING_BATCH_SIZE=64
cls_indx_path = "/home/bassel/data/office-actions/office_actions_19/short_clips/labels/class_index.txt"
cls_indx = parse_cls_indx(cls_indx_path)
def calculate_cooccurence_matrix(file_path = 'predict_ret.txt'):
coocurrence_matrix = np.zeros((18,18)).tolist()
with open(file_path) as predictions:
for i, l in enumerate(predictions):
rec = [float(val) for val in l.split(',')]
coocurrence_matrix[int(rec[0])-1][int(rec[2])-1] += 1
return coocurrence_matrix
def file_save_coocurrence_matrix(file_path, coocurrence_matrix, cls_indx):
writer = open(file_path, 'a')
for action_cls, action_row in enumerate(coocurrence_matrix):
action_row = [str(x) for x in action_row]
writer.write("{:20}".format(str(cls_indx[action_cls+1])) + "," + ",".join(action_row))
writer.write("\n")
for action_cls, action_row in enumerate(coocurrence_matrix):
sum = np.array(action_row).sum()
writer.write(str(cls_indx[action_cls + 1]) + "," +
str(100 * action_row[action_cls] / float(sum)) + "%\n")
writer.close()
if __name__ == '__main__':
for perspective, (testing_file, model_name, mean_file) in testing_files_dic.items():
run_test(ds_dir, mean_file, "model/"+model_name, testing_file, TESTING_BATCH_SIZE)
testing_accuracy, _, _ = evaluate_prediction()
file_path = "oa18_on_kinectics_cooccurence_matrix_{}.csv".format(perspective)
with open(file_path, 'w') as fw:
fw.write(str(testing_accuracy)+"\n")
coocurrence_matrix = calculate_cooccurence_matrix()
file_save_coocurrence_matrix(file_path, coocurrence_matrix, cls_indx)
|
from __future__ import division
import numpy as np
import neuro #This is MUST be included
import random
random.seed(0)
def neuro_start(inputs, targets, training):
#initializes the neural network
network=neuro.setup_network(inputs)
#The number of repetitions that you will
#be training your network with
training_reps= training
#trains your neural network
neuro.train(network, inputs, targets, training_reps)
total = 0
correct = 0
for i in range(1,10):
for x in range(1,10):
pred = neuro.predict(network, [i+x])
#rounds the predicted value to either 0 or 1
#print '{} == pred {}'.format(i+x,pred)
pred = np.round(pred)
if (i+x)%2 == pred:
correct+= 1
total += 1
percent = (correct/total) * 100
#print 'with training: {}'.format(training_reps)
#print 'correct: {}\n total: {} \n {:.2f}%'.format(correct, total, (correct/total) * 100)
return training_reps, percent, correct, total
def main():
# Get inputs from file
inputs = []
with open('dataset.csv', 'r') as file:
for line in file:
inputs.append([sum(list(map(int, line.strip().split(','))))])
# Generate target points
#print inputs
targets = []
for a in inputs:
targets.append([(a[0]%2)])
#print targets
# Perform cross validation on training point and return best result.
maxi = [1, 0]
for t in [50 * i for i in range(1,10)]:
temp = neuro_start(inputs, targets, t)
if temp[1] > maxi[1]:
maxi = temp
print 'Training_points: {}\n Correct: {:.2f}% ({} out of {})'.format(*maxi)
if __name__ == '__main__':
main()
|
from tqdm import tqdm
import requests
chunk_size = 1024
def download_file(url, filename):
r = requests.get(url, stream=True)
total_size = int(r.headers['content-length'])
with open(filename, 'wb') as f:
for data in tqdm(iterable=r.iter_content(
chunk_size=chunk_size), total=total_size / chunk_size, unit='KB'):
f.write(data)
|
from .ast import *
from .ast import __all__ as __ast_all__
from .astprint import *
from .astprint import __all__ as __astprint_all__
from .moduleinfo import *
from .moduleinfo import __all__ as __moduleinfo_all__
from .obj import *
from .obj import __all__ as __obj_all__
from .lexer import *
from .lexer import __all__ as __lexer_all__
from .parser import *
from .parser import __all__ as __parser_all__
from .run import *
from .run import __all__ as __run_all__
__version__ = '0.1.0'
__version_info__ = tuple(int(segment) for segment in __version__.split('.'))
__all__ = (
__ast_all__ + __astprint_all__ + __moduleinfo_all__ + __obj_all__ +
__lexer_all__ + __parser_all__ + __run_all__
)
|
import os
c.NotebookApp.ip = '*'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.NotebookApp.notebook_dir = u'/root/'
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
from trezorlib import btc, messages as proto
from ..support.tx_cache import tx_cache
from .common import TrezorTest
TXHASH_d5f65e = bytes.fromhex(
"d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"
)
# address_n = [177] < 68
# address_n = [16518] < 66
class TestZerosig(TrezorTest):
"""
def test_mine_zero_signature(self):
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
msg = self.client._prepare_sign_tx('Bitcoin', [inp1, ], [])
for n in range(3500, 200000):
out1 = proto.TxOutputType(address_n=[n],
amount=390000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
msg.ClearField('outputs')
msg.outputs.extend([out1, ])
tx = self.client.call(msg)
siglen = tx.serialized_tx[44]
print(siglen)
if siglen < 67:
print("!!!!", n)
print(tx.serialized_tx.hex())
return
"""
def test_one_zero_signature(self):
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
# Following address_n has been mined by 'test_mine_zero_signature'
out1 = proto.TxOutputType(
address_n=[177],
amount=390000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = btc.sign_tx(
self.client, "Bitcoin", [inp1], [out1], prev_txes=tx_cache("Bitcoin")
)
siglen = serialized_tx[44]
# TREZOR must strip leading zero from signature
assert siglen == 67
def test_two_zero_signature(self):
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
# Following address_n has been mined by 'test_mine_zero_signature'
out1 = proto.TxOutputType(
address_n=[16518],
amount=390000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = btc.sign_tx(
self.client, "Bitcoin", [inp1], [out1], prev_txes=tx_cache("Bitcoin")
)
siglen = serialized_tx[44]
# TREZOR must strip leading zero from signature
assert siglen == 66
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=250, unique=True)
slug = models.SlugField(max_length=250, unique=True)
description = models.TextField(blank=True)
image = models.ImageField(upload_to='category', blank=True)
class Meta:
ordering = ('name',)
verbose_name = 'category'
verbose_name_plural = 'categories'
def get_url(self):
return reverse('products_by_category', args=[self.slug])
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=250, unique=True)
slug = models.SlugField(max_length=250, unique=True)
description = models.TextField(blank=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
price = models.DecimalField(max_digits=10, decimal_places=2)
image = models.ImageField(upload_to='product', blank=True)
stock = models.IntegerField()
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
verbose_name = 'product'
verbose_name_plural = 'products'
def get_url(self):
return reverse('product_detail', args=[self.category.slug, self.slug])
def __str__(self):
return self.name
# Model: Cart
class Cart(models.Model):
cart_id = models.CharField(max_length=250, blank=True)
date_added = models.DateField(auto_now_add=True)
class Meta:
db_table = 'Cart'
ordering = ['date_added']
def __str__(self):
return self.cart_id
class CartItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
quantity = models.IntegerField()
active = models.BooleanField(default=True)
class Meta:
db_table = 'CartItem'
def sub_total(self):
return self.product.price * self.quantity
def __str__(self):
return self.product
class Order(models.Model):
token = models.CharField(max_length=250, blank=True)
total = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='USD Order Total')
emailAddress = models.EmailField(max_length=250, blank=True, verbose_name='Email Address')
created = models.DateTimeField(auto_now_add=True)
billingName = models.CharField(max_length=250, blank=True)
billingAddress1 = models.CharField(max_length=250, blank=True)
billingCity = models.CharField(max_length=250, blank=True)
billingPostcode = models.CharField(max_length=250, blank=True)
billingCountry = models.CharField(max_length=250, blank=True)
shippingName = models.CharField(max_length=250, blank=True)
shippingAddress1 = models.CharField(max_length=250, blank=True)
shippingCity = models.CharField(max_length=250, blank=True)
shippingPostcode = models.CharField(max_length=250, blank=True)
shippingCountry = models.CharField(max_length=250, blank=True)
class Meta:
db_table = 'Order'
ordering = ['-created']
def __str__(self):
return str(self.id)
class OrderItem(models.Model):
product = models.CharField(max_length=250)
quantity = models.IntegerField()
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='USD Price')
order = models.ForeignKey(Order, on_delete=models.CASCADE)
class Meta:
db_table = 'OrderItem'
def sub_total(self):
return self.quantity * self.price
def __str__(self):
return self.product
|
from scoring.models import Judge_Assignment, Project
def cal_average_score():
done_list = []
jas = list(Judge_Assignment.objects.all())
for ja in jas:
if ja.project_id not in done_list:
score = 0
done_list.append(ja.project_id)
project = Judge_Assignment.objects.filter(project_id = ja.project_id.project_id)
for p in project:
score = score + p.raw_score
avg_score = score / len(list(project))
if score == 0:
continue
project = Project.objects.get(project_id = ja.project_id.project_id)
project.avg_score = avg_score
project.save() |
from bnn import BNN
from collections import OrderedDict
import tensorflow as tf
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
class EMAML:
def __init__(self,
dim_input,
dim_output,
dim_hidden=32,
num_layers=4,
num_particles=2,
max_test_step=5):
# model size
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.num_particles = num_particles
# learning rate
self.in_lr = tf.placeholder_with_default(input=FLAGS.in_lr,
name='in_lr',
shape=[])
self.out_lr = tf.placeholder_with_default(input=FLAGS.out_lr,
name='out_lr',
shape=[])
# for test time
self.max_test_step = max_test_step
# build model
self.bnn = BNN(dim_input=self.dim_input,
dim_output=self.dim_output,
dim_hidden=self.dim_hidden,
num_layers=self.num_layers,
is_bnn=False)
# init model
self.construct_network_weights = self.bnn.construct_network_weights
# forwarding
self.forward_network = self.bnn.forward_network
# init input data
self.train_x = tf.placeholder(dtype=tf.float32, name='train_x')
self.train_y = tf.placeholder(dtype=tf.float32, name='train_y')
self.valid_x = tf.placeholder(dtype=tf.float32, name='valid_x')
self.valid_y = tf.placeholder(dtype=tf.float32, name='valid_y')
# init parameters
self.W_network_particles = None
# build model
def construct_model(self,
is_training=True):
print('start model construction')
# init model
with tf.variable_scope('model', reuse=None) as training_scope:
# init parameters
if is_training or self.W_network_particles is None:
# network parameters
self.W_network_particles = [self.construct_network_weights(scope='network{}'.format(p_idx))
for p_idx in range(self.num_particles)]
else:
training_scope.reuse_variables()
# set number of follower steps
if is_training:
max_update_step = FLAGS.in_step
else:
max_update_step = max(FLAGS.in_step, self.max_test_step)
# task-wise inner loop
def fast_learn_one_task(inputs):
# decompose input data
[train_x, valid_x,
train_y, valid_y] = inputs
##########
# update #
##########
# init meta loss
meta_loss = []
# get the follow particles
WW_update = [OrderedDict(zip(W_dic.keys(), W_dic.values()))
for W_dic in self.W_network_particles]
# for each step
step_train_loss = [None] * (max_update_step + 1)
step_valid_loss = [None] * (max_update_step + 1)
step_train_pred = [None] * (max_update_step + 1)
step_valid_pred = [None] * (max_update_step + 1)
for s_idx in range(max_update_step + 1):
# for each particle
train_z_list = []
valid_z_list = []
train_mse_list = []
valid_mse_list = []
for p_idx in range(FLAGS.num_particles):
# compute prediction
train_z_list.append(self.forward_network(x=train_x, W_dict=WW_update[p_idx]))
valid_z_list.append(self.forward_network(x=valid_x, W_dict=WW_update[p_idx]))
# compute mse data
train_mse_list.append(self.bnn.mse_data(predict_y=train_z_list[-1], target_y=train_y))
valid_mse_list.append(self.bnn.mse_data(predict_y=valid_z_list[-1], target_y=valid_y))
# update
if s_idx < max_update_step:
# compute loss and gradient
particle_loss = tf.reduce_mean(train_mse_list[-1])
dWp = tf.gradients(ys=particle_loss,
xs=list(WW_update[p_idx].values()))
# stop gradient to avoid second order
if FLAGS.stop_grad:
dWp = [tf.stop_gradient(grad) for grad in dWp]
# re-order
dWp = OrderedDict(zip(WW_update[p_idx].keys(), dWp))
# for each param
param_names = []
param_vals = []
for key in list(WW_update[p_idx].keys()):
if FLAGS.in_grad_clip > 0:
grad = tf.clip_by_value(dWp[key], -FLAGS.in_grad_clip, FLAGS.in_grad_clip)
else:
grad = dWp[key]
param_names.append(key)
param_vals.append(WW_update[p_idx][key] - self.in_lr * grad)
WW_update[p_idx] = OrderedDict(zip(param_names, param_vals))
else:
# meta-loss
meta_loss.append(tf.reduce_mean(valid_mse_list[-1]))
# aggregate particle results
step_train_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(train_mse) for train_mse in train_mse_list])
step_valid_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(valid_mse) for valid_mse in valid_mse_list])
step_train_pred[s_idx] = tf.concat([tf.expand_dims(train_z, 0) for train_z in train_z_list], axis=0)
step_valid_pred[s_idx] = tf.concat([tf.expand_dims(valid_z, 0) for valid_z in valid_z_list], axis=0)
# sum meta-loss over particles
meta_loss = tf.reduce_sum(meta_loss)
return [step_train_loss,
step_valid_loss,
step_train_pred,
step_valid_pred,
meta_loss]
# set output type
out_dtype = [[tf.float32] * (max_update_step + 1),
[tf.float32] * (max_update_step + 1),
[tf.float32] * (max_update_step + 1),
[tf.float32] * (max_update_step + 1),
tf.float32]
# compute over tasks
result = tf.map_fn(fast_learn_one_task,
elems=[self.train_x, self.valid_x,
self.train_y, self.valid_y],
dtype=out_dtype,
parallel_iterations=FLAGS.num_tasks)
# unroll result
full_step_train_loss = result[0]
full_step_valid_loss = result[1]
full_step_train_pred = result[2]
full_step_valid_pred = result[3]
full_meta_loss = result[4]
# for training
if is_training:
# summarize results
self.total_train_loss = [tf.reduce_mean(full_step_train_loss[j])
for j in range(FLAGS.in_step + 1)]
self.total_valid_loss = [tf.reduce_mean(full_step_valid_loss[j])
for j in range(FLAGS.in_step + 1)]
self.total_meta_loss = tf.reduce_mean(full_meta_loss)
# prediction
self.total_train_z_list = full_step_train_pred
self.total_valid_z_list = full_step_valid_pred
###############
# meta update #
###############
update_params_list = []
update_params_name = []
# get params
for p in range(FLAGS.num_particles):
for name in self.W_network_particles[0].keys():
update_params_name.append([p, name])
update_params_list.append(self.W_network_particles[p][name])
# set optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=self.out_lr)
# compute gradient
gv_list = optimizer.compute_gradients(loss=self.total_meta_loss,
var_list=update_params_list)
# gradient clipping
if FLAGS.out_grad_clip > 0:
gv_list = [(tf.clip_by_value(grad, -FLAGS.out_grad_clip, FLAGS.out_grad_clip), var)
for grad, var in gv_list]
# optimizer
self.metatrain_op = optimizer.apply_gradients(gv_list)
else:
# summarize results
self.eval_train_loss = [tf.reduce_mean(full_step_train_loss[j])
for j in range(max_update_step + 1)]
self.eval_valid_loss = [tf.reduce_mean(full_step_valid_loss[j])
for j in range(max_update_step + 1)]
# prediction
self.eval_train_z_list = full_step_train_pred
self.eval_valid_z_list = full_step_valid_pred
print('end of model construction')
|
<<<<<<< HEAD
import torch
import utils
import dataset
import pandas as pd
from model import BertBaseUncased
import CONFIG as config
from tqdm import tqdm
def test_fn(dataloader,model,device):
model.eval()
accuracy = utils.AverageMeter()
fin_outputs = []
tk0 = tqdm(dataloader,total = len(dataloader))
with torch.no_grad():
for bi,d in enumerate(tk0):
ids = d['ids']
token_type_ids = d['token_type_ids']
mask = d['mask']
targets = d['targets']
ids = ids.to(device,dtype = torch.long)
token_type_ids = token_type_ids.to(device,dtype = torch.long)
mask = mask.to(device,dtype = torch.long)
targets = targets.to(device,dtype = torch.long)
outputs = model(
ids,
mask,
token_type_ids
)
outputs = outputs.float()
softmax = torch.log_softmax(outputs,dim = 1)
_,preds = torch.max(softmax,dim = 1)
fin_outputs.extend(preds)
acc = (targets == preds).float().mean()
accuracy.update(acc.item(),ids.size(0))
tk0.set_postfix(test_acc = accuracy.avg)
return fin_outputs
def run_test():
df = pd.read_csv(config.TESTING_FILE)
df = df[df.sentiment!='neutral']
df.sentiment = df.sentiment.apply(lambda x:utils.sent2num(x))
test_dataset = dataset.BERTDataset(
tweet=df.text.values,
sentiment = df.sentiment.values
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=config.VALID_BATCH_SIZE,
)
device = 'cpu'
model = BertBaseUncased().to(device)
model.load_state_dict(torch.load(config.MODEL_PATH))
outputs = test_fn(test_dataloader,model,device)
print('Test Accuracy: ',(outputs == df.sentiment.values).mean())
run_test()
=======
import torch
import utils
import dataset
import pandas as pd
from model import BertBaseUncased
import CONFIG as config
from tqdm import tqdm
def test_fn(dataloader,model,device):
model.eval()
accuracy = utils.AverageMeter()
fin_outputs = []
tk0 = tqdm(dataloader,total = len(dataloader))
with torch.no_grad():
for bi,d in enumerate(tk0):
ids = d['ids']
token_type_ids = d['token_type_ids']
mask = d['mask']
targets = d['targets']
ids = ids.to(device,dtype = torch.long)
token_type_ids = token_type_ids.to(device,dtype = torch.long)
mask = mask.to(device,dtype = torch.long)
targets = targets.to(device,dtype = torch.long)
outputs = model(
ids,
mask,
token_type_ids
)
outputs = outputs.float()
softmax = torch.log_softmax(outputs,dim = 1)
_,preds = torch.max(softmax,dim = 1)
fin_outputs.extend(preds)
acc = (targets == preds).float().mean()
accuracy.update(acc.item(),ids.size(0))
tk0.set_postfix(test_acc = accuracy.avg)
return fin_outputs
def run_test():
df = pd.read_csv(config.TESTING_FILE)
df = df[df.sentiment!='neutral']
df.sentiment = df.sentiment.apply(lambda x:utils.sent2num(x))
test_dataset = dataset.BERTDataset(
tweet=df.text.values,
sentiment = df.sentiment.values
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=config.VALID_BATCH_SIZE,
)
device = 'cpu'
model = BertBaseUncased().to(device)
model.load_state_dict(torch.load(config.MODEL_PATH))
outputs = test_fn(test_dataloader,model,device)
print('Test Accuracy: ',(outputs == df.sentiment.values).mean())
run_test()
>>>>>>> 234f14c... Added app.py + final model
|
# 10_kapitel_13_repetitionsfragen.py
import re
max_text_length=70
max_text_delta=20
def output(title, string):
print('╔'+''.center(max_text_length+8, '═')+'╗')
print('║ '+title.center(max_text_length+7).upper()+'║')
print('╠'+''.center(max_text_length+8, '═')+'╣')
string=string+' '*max_text_length
search_pattern=re.compile(r'\w+.{'+str(max_text_length-max_text_delta-7)+r','+str(max_text_length-7)+r'}[ |.|,|\n|>|\W]', re.DOTALL)
results=search_pattern.findall(string)
for line in results:
print('║ '+line.strip()+'║'.rjust(max_text_length+8-len(line.strip())))
print('╚'+''.center(max_text_length+8, '═')+'╝')
input()
output('Frage 01', 'Der Funktion PyPDF2.PdfFileReader() übergeben sie nicht den Stringwert mit dem Namen der PDF-Datei. Was übergeben sie statdessen?')
output('Antwort', 'Man übergibt die Variabel welche die geöffnete Datei im binary Modus beinhaltet. Bsp: pdf_file_content = PyPDF2.PdfFileReader(open(pdf_file.pdf, "rb"))')
output('Frage 02', 'In welchen Modi müssen File-Objekte für PdfFileReader() und PdfFileWriter() geöffnet sein?')
output('Antwort', 'Read Binary bzw "rb" Beispiel: open(pdf_file.pdf, "rb") oder im Modus Write-Binary für .PdfFileWriter()')
output('Frage 03', 'Wie rufen sie dsa Page-Objekt für die Seite 5 von einem PdfFileReader-Objekt ab?')
output('Antwort', 'Man nutzt page_5=pdf_file_content.getPage(4) wobei ein PDF-Dokument immer mit der Seitenzahl 0 beginnt und daher getPage(4) für die 5. Seite verwendet werden muss.')
output('Frage 04', 'In welcher PdfFileReader-Variable ist die Anzahl der Seiten in einem PDF-Dokument gespeichert?')
output('Antwort', 'pdf_file_content.numPages liest die Anzahl Seiten des PDF\'s aus')
output('Frage 05', 'Was müssen sie tun, bevor sie die Page-Objekte von einem PdfFileReader-Objekt abrufen können, dessen PDF mit dem Passwort "swordfish" geschützt ist?')
output('Antwort', 'Mit pdf_file_content.decrypt("swordfish") lässt sich die Datei entschlüsseln. Mit .encrypt(passwort) wieder verschlüsseln')
output('Frage 06', 'Welche Methoden verwenden sie, um eine Seite zu drehen?')
output('Antwort', 'Die Seite lässt sich mit page_5.rotateClockwise(90) drehen.')
output('Frage 07', 'Welche Methode gibt ein Document-Objekt für die Datei demo.docx zurück?')
output('Antwort', 'Mit doc_file=docx.Document("demo.docx") lässt sich demo.docx auslesen und in einer Variabel speichern.')
output('Frage 08', 'Was ist der Unterschied zwischen einem Paragraphen- und einem Run-Objekt?')
output('Antwort', 'Die Paragraphen beinhalten den Kompletten Text bis zum nächsten Zeilenumbruch und ist selber wiederum in Runs unterteilt welche das Aussehen der Textabschnitte bestimmt. Jedes mal wen sich die Formatierung ändert entsteht ein neuer Run.')
output('Frage 09', 'Wie rufen sie die Liste der Paragraphen-Objekte für ein Dokument ab das in der Variabel "doc" gespeichert ist?')
output('Antwort', 'Die Funktion doc.paragraphs gibt alle Paragraphen-Objekte als Liste aus')
output('Frage 10', 'Was für Objekte verfügen über die Variablen bold, underline, italic, strike und outline?')
output('Antwort', 'Diese Variablen gehören zum Run-Objekt und definieren ob der Text fett, unterstrichen, schräg, durchgestrichen oder outlined ist. Beispiel: run_objekt.italic=True')
output('Frage 11', 'Was ist der Unterschied zwischen den Werten True, False und None für die Variable bold?')
output('Antwort', 'bold=True heisst dass der Text fett geschrieben wird, bold=False heisst er wird nicht Fett dargestellt und bold=None verwendet die Standardwerte des Run-Objekts')
output('Frage 12', 'Wie erstellen sie ein Document-Objekt für ein neues Word_Dokument?')
output('Antwort', 'Ein neues Dokument kann man mit docx.Document() erstellen')
output('Frage 13', 'Wie fügen sie einem Document-Objekt in der Variablen doc einen Absatz mit dem Text "Hello there" hinzu?')
output('Antwort', 'Mit doc.add_paragraph("Hello there") lässt sich ein neuer Abschnitt mit dem entsprechenden Textinhalt erstellen.')
output('Frage 14', 'Welche Integerwerte können sie verwenden, um in einem Word-Dokument Überschriften-Ebenen anzugeben?')
output('Antwort', 'Mit doc_file.add_heading("Header Text", 0-4) lassen sich Header einfügen.')
|
"""Code for handling downloading of HPO files used by scout from CLI"""
import logging
import pathlib
import click
from scout.utils.scout_requests import fetch_mim_files
LOG = logging.getLogger(__name__)
def print_omim(out_dir, api_key):
"""Print HPO files to a directory
Args:
out_dir(Path)
"""
mim_files = fetch_mim_files(api_key, mim2genes=True, genemap2=True)
file_name = "genemap2.txt"
file_path = out_dir / file_name
LOG.info("Print genemap genes to %s", file_path)
with file_path.open("w", encoding="utf-8") as outfile:
for line in mim_files["genemap2"]:
outfile.write(line + "\n")
file_name = "mim2genes.txt"
file_path = out_dir / file_name
LOG.info("Print mim2gene info to %s", file_path)
with file_path.open("w", encoding="utf-8") as outfile:
for line in mim_files["mim2genes"]:
outfile.write(line + "\n")
@click.command("omim", help="Download a files with OMIM info")
@click.option("--api-key", help="Specify the api key", required=True)
@click.option("-o", "--out-dir", default="./", show_default=True)
def omim(out_dir, api_key):
"""Download the OMIM genes"""
out_dir = pathlib.Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
LOG.info("Download OMIM resources to %s", out_dir)
print_omim(out_dir, api_key)
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .dhcp_option import DhcpOption
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DhcpSearchDomainOption(DhcpOption):
"""
DHCP option for specifying a search domain name for DNS queries. For more information, see
`DNS in Your Virtual Cloud Network`__.
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new DhcpSearchDomainOption object with values from keyword arguments. The default value of the :py:attr:`~oci.core.models.DhcpSearchDomainOption.type` attribute
of this class is ``SearchDomain`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this DhcpSearchDomainOption.
:type type: str
:param search_domain_names:
The value to assign to the search_domain_names property of this DhcpSearchDomainOption.
:type search_domain_names: list[str]
"""
self.swagger_types = {
'type': 'str',
'search_domain_names': 'list[str]'
}
self.attribute_map = {
'type': 'type',
'search_domain_names': 'searchDomainNames'
}
self._type = None
self._search_domain_names = None
self._type = 'SearchDomain'
@property
def search_domain_names(self):
"""
**[Required]** Gets the search_domain_names of this DhcpSearchDomainOption.
A single search domain name according to `RFC 952`__
and `RFC 1123`__. During a DNS query,
the OS will append this search domain name to the value being queried.
If you set :class:`DhcpDnsOption` to `VcnLocalPlusInternet`,
and you assign a DNS label to the VCN during creation, the search domain name in the
VCN's default set of DHCP options is automatically set to the VCN domain
(for example, `vcn1.oraclevcn.com`).
If you don't want to use a search domain name, omit this option from the
set of DHCP options. Do not include this option with an empty list
of search domain names, or with an empty string as the value for any search
domain name.
__ https://tools.ietf.org/html/rfc952
__ https://tools.ietf.org/html/rfc1123
:return: The search_domain_names of this DhcpSearchDomainOption.
:rtype: list[str]
"""
return self._search_domain_names
@search_domain_names.setter
def search_domain_names(self, search_domain_names):
"""
Sets the search_domain_names of this DhcpSearchDomainOption.
A single search domain name according to `RFC 952`__
and `RFC 1123`__. During a DNS query,
the OS will append this search domain name to the value being queried.
If you set :class:`DhcpDnsOption` to `VcnLocalPlusInternet`,
and you assign a DNS label to the VCN during creation, the search domain name in the
VCN's default set of DHCP options is automatically set to the VCN domain
(for example, `vcn1.oraclevcn.com`).
If you don't want to use a search domain name, omit this option from the
set of DHCP options. Do not include this option with an empty list
of search domain names, or with an empty string as the value for any search
domain name.
__ https://tools.ietf.org/html/rfc952
__ https://tools.ietf.org/html/rfc1123
:param search_domain_names: The search_domain_names of this DhcpSearchDomainOption.
:type: list[str]
"""
self._search_domain_names = search_domain_names
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the extraction tool object."""
from __future__ import unicode_literals
import argparse
import unittest
from plaso.cli import extraction_tool
from tests.cli import test_lib
class ExtractionToolTest(test_lib.CLIToolTestCase):
"""Tests for the extraction tool object."""
# pylint: disable=protected-access
_EXPECTED_PERFORMANCE_OPTIONS = '\n'.join([
'usage: extraction_tool_test.py [--buffer_size BUFFER_SIZE]',
' [--queue_size QUEUE_SIZE]',
'',
'Test argument parser.',
'',
'optional arguments:',
(' --buffer_size BUFFER_SIZE, --buffer-size BUFFER_SIZE, '
'--bs BUFFER_SIZE'),
(' The buffer size for the output (defaults to '
'196MiB).'),
' --queue_size QUEUE_SIZE, --queue-size QUEUE_SIZE',
' The maximum number of queued items per worker',
' (defaults to 125000)',
''])
# TODO: add test for _CreateProcessingConfiguration
def testParsePerformanceOptions(self):
"""Tests the _ParsePerformanceOptions function."""
test_tool = extraction_tool.ExtractionTool()
options = test_lib.TestOptions()
test_tool._ParsePerformanceOptions(options)
# TODO: add test for _ParseProcessingOptions
# TODO: add test for _PreprocessSources
# TODO: add test for _ReadParserPresetsFromFile
# TODO: add test for _SetExtractionParsersAndPlugins
# TODO: add test for _SetExtractionPreferredTimeZone
def testAddPerformanceOptions(self):
"""Tests the AddPerformanceOptions function."""
argument_parser = argparse.ArgumentParser(
prog='extraction_tool_test.py', description='Test argument parser.',
add_help=False, formatter_class=test_lib.SortedArgumentsHelpFormatter)
test_tool = extraction_tool.ExtractionTool()
test_tool.AddPerformanceOptions(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_PERFORMANCE_OPTIONS)
# TODO: add test for AddProcessingOptions
def testListParsersAndPlugins(self):
"""Tests the ListParsersAndPlugins function."""
presets_file = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(presets_file)
output_writer = test_lib.TestOutputWriter(encoding='utf-8')
test_tool = extraction_tool.ExtractionTool(output_writer=output_writer)
test_tool._presets_manager.ReadFromFile(presets_file)
test_tool.ListParsersAndPlugins()
output = output_writer.ReadOutput()
number_of_tables = 0
lines = []
for line in output.split('\n'):
line = line.strip()
lines.append(line)
if line.startswith('*****') and line.endswith('*****'):
number_of_tables += 1
self.assertIn('Parsers', lines[1])
lines = frozenset(lines)
self.assertEqual(number_of_tables, 10)
expected_line = 'filestat : Parser for file system stat information.'
self.assertIn(expected_line, lines)
expected_line = 'bencode_utorrent : Parser for uTorrent bencoded files.'
self.assertIn(expected_line, lines)
expected_line = (
'msie_webcache : Parser for MSIE WebCache ESE database files.')
self.assertIn(expected_line, lines)
expected_line = 'olecf_default : Parser for a generic OLECF item.'
self.assertIn(expected_line, lines)
expected_line = 'plist_default : Parser for plist files.'
self.assertIn(expected_line, lines)
# Note that the expected line is truncated by the cell wrapping in
# the table.
expected_line = (
'chrome_27_history : Parser for Google Chrome 27 and up history SQLite')
self.assertIn(expected_line, lines)
expected_line = 'ssh : Parser for SSH syslog entries.'
self.assertIn(expected_line, lines)
expected_line = 'winreg_default : Parser for Registry data.'
self.assertIn(expected_line, lines)
if __name__ == '__main__':
unittest.main()
|
# Demonstrates the Personally Identifiable Information (PII) detection capability of the expert.ai (Cloud based) Natural Language API
import json
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()
text = "CREDIT CARD CANCELLATION REQUEST\nPERSONAL INFORMATION:\nNAME: Anne Cuthbert\nADDRESS: 3239 Rardin Drive, Broad Run, VA, 20137\nBORN: 02/06/1992, Charlottetown, Canada\nPHONE NUMBER: 985-281-4501\nEMAIL: acut@mails.com\nDear Sir or madam,\nOn April 8, 2017, I lost my credit card while I was abroad. Please find below the credit card's details:\nCard Type: American Express 4242 8978 2056 4987, Expiration date 09/2019, CVV 987.\nAs soon as I realized the card was lost, I phoned the company and asked that it be cancelled.\nThis letter is to request that you issue me a replacement card as soon as possible. The cancelled card should be not authorized under any circumstances.\nThank you for your attention.\nIf you have any questions, I can be reached at the phone number or email above mentioned\nSincerely,\nAnne Cuthbert"
detector = 'pii'
language= 'en'
output = client.detection(body={"document": {"text": text}}, params={'detector': detector, 'language': language})
# Output extra data containing the JSON-LD object
print("extra_data: ",json.dumps(output.extra_data, indent=4, sort_keys=True))
|
__version__ = '4.6.7'
|
#!/usr/bin/env python3
def separate(a: list, length) -> zip:
return zip(*[iter(a)] * length)
n, *a = map(int, open(0).read().split())
b = a[:]
for _ in [0] * (n - 1):
a = [*map(max, separate(a, 2))]
print(b.index(min(a)) + 1) |
#-*- coding: utf-8 -*-
"""
@author:Bengali.AI
"""
from __future__ import print_function
#-------------------------------------------
# globals
#-------------------------------------------
from .base import BaseNormalizer,languages
#-------------------------------------------
# cleaner class
#-------------------------------------------
class Normalizer(BaseNormalizer):
def __init__(self,
allow_english=False,
keep_legacy_symbols=False,
legacy_maps=None):
'''
initialize a normalizer
args:
allow_english : allow english letters numbers and punctuations [default:False]
keep_legacy_symbols : legacy symbols will be considered as valid unicodes[default:False]
'৺':Isshar
'৻':Ganda
'ঀ':Anji (not '৭')
'ঌ':li
'ৡ':dirgho li
'ঽ':Avagraha
'ৠ':Vocalic Rr (not 'ঋ')
'৲':rupi
'৴':currency numerator 1
'৵':currency numerator 2
'৶':currency numerator 3
'৷':currency numerator 4
'৸':currency numerator one less than the denominator
'৹':Currency Denominator Sixteen
legacy_maps : a dictionay for changing legacy symbols into a more used unicode
a default legacy map is included in the language class as well,
legacy_maps={'ঀ':'৭',
'ঌ':'৯',
'ৡ':'৯',
'৵':'৯',
'৻':'ৎ',
'ৠ':'ঋ',
'ঽ':'ই'}
pass-
* legacy_maps=None; for keeping the legacy symbols as they are
* legacy_maps="default"; for using the default legacy map
* legacy_maps=custom dictionary(type-dict) ; which will map your desired legacy symbol to any of symbol you want
* the keys in the custiom dicts must belong to any of the legacy symbols
* the values in the custiom dicts must belong to either vowels,consonants,numbers or diacritics
vowels = ['অ', 'আ', 'ই', 'ঈ', 'উ', 'ঊ', 'ঋ', 'এ', 'ঐ', 'ও', 'ঔ']
consonants = ['ক', 'খ', 'গ', 'ঘ', 'ঙ', 'চ', 'ছ','জ', 'ঝ', 'ঞ',
'ট', 'ঠ', 'ড', 'ঢ', 'ণ', 'ত', 'থ', 'দ', 'ধ', 'ন',
'প', 'ফ', 'ব', 'ভ', 'ম', 'য', 'র', 'ল', 'শ', 'ষ',
'স', 'হ','ড়', 'ঢ়', 'য়','ৎ']
numbers = ['০', '১', '২', '৩', '৪', '৫', '৬', '৭', '৮', '৯']
vowel_diacritics = ['া', 'ি', 'ী', 'ু', 'ূ', 'ৃ', 'ে', 'ৈ', 'ো', 'ৌ']
consonant_diacritics = ['ঁ', 'ং', 'ঃ']
> for example you may want to map 'ঽ':Avagraha as 'হ' based on visual similiarity
(default:'ই')
** legacy contions: keep_legacy_symbols and legacy_maps operates as follows
case-1) keep_legacy_symbols=True and legacy_maps=None
: all legacy symbols will be considered valid unicodes. None of them will be changed
case-2) keep_legacy_symbols=True and legacy_maps=valid dictionary example:{'ঀ':'ক'}
: all legacy symbols will be considered valid unicodes. Only 'ঀ' will be changed to 'ক' , others will be untouched
case-3) keep_legacy_symbols=False and legacy_maps=None
: all legacy symbols will be removed
case-4) keep_legacy_symbols=False and legacy_maps=valid dictionary example:{'ঽ':'ই','ৠ':'ঋ'}
: 'ঽ' will be changed to 'ই' and 'ৠ' will be changed to 'ঋ'. All other legacy symbols will be removed
'''
if legacy_maps=="default":
legacy_maps=languages["bangla"].legacy_maps
self.complex_roots=languages["bangla"].complex_roots
super(Normalizer,self).__init__(language="bangla",
allow_english=allow_english,
keep_legacy_symbols=keep_legacy_symbols,
legacy_maps=legacy_maps)
#-------------------------------------------------extended ops----------------------
# assemese
self.assamese_map={'ৰ':'র','ৱ':'ব'}
self.word_level_ops["AssameseReplacement"] = self.replaceAssamese
# to+hosonto case
'''
case-1: if 'ত'+hosonto is followed by anything other than a consonant the word is an invalid word
case-2: The ত্ symbol which should be replaced by a 'ৎ' occurs for all consonants except:ত,থ,ন,ব,ম,য,র
# code to verify this manually
for c in self.consonants:
print('ত'+ self.lang.connector+c)
'''
self.valid_consonants_after_to_and_hosonto = ['ত','থ','ন','ব','ম','য','র']
self.decomp_level_ops["ToAndHosontoNormalize"] = self.normalizeToandHosonto
# invalid folas
self.decomp_level_ops["NormalizeConjunctsDiacritics"] = self.cleanInvalidConjunctDiacritics
# complex root cleanup
self.decomp_level_ops["ComplexRootNormalization"] = self.convertComplexRoots
#-------------------------word ops-----------------------------------------------------------------------------
def replaceAssamese(self):
self.replaceMaps(self.assamese_map)
#-------------------------unicode ops-----------------------------------------------------------------------------
def cleanConsonantDiacritics(self):
# consonant diacritics
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-1:
if d in self.lang.consonant_diacritics and self.decomp[idx+1] in self.lang.consonant_diacritics:
# if they are same delete the current one
if d==self.decomp[idx+1]:
self.decomp[idx]=None
elif d in ['ং', 'ঃ'] and self.decomp[idx+1]=='ঁ':
self.swapIdxs(idx,idx+1)
elif d=='ং' and self.decomp[idx+1]== 'ঃ':
self.decomp[idx+1]=None
elif d=='ঃ' and self.decomp[idx+1]== 'ং':
self.decomp[idx+1]=None
def fixNoSpaceChar(self):
# replace
for idx,d in enumerate(self.decomp):
if idx==0 and self.decomp[idx] in ["\u200c","\u200d"]:
self.decomp[idx]=None
else:
if self.decomp[idx]=="\u200c":
self.decomp[idx]="\u200d"
self.decomp=[x for x in self.decomp if x is not None]
# strict
for idx,d in enumerate(self.decomp):
if idx>0:
if self.decomp[idx]=="\u200d":
if self.decomp[idx-1]==self.lang.connector:
self.decomp[idx]=None
self.decomp[idx-1]=None
elif self.decomp[idx-1]!='র':
self.decomp[idx]=None
else:
self.decomp[idx-1]+=self.decomp[idx]
self.decomp[idx]=None
##------------------------------------------------------------------------------------------------------
def cleanInvalidConnector(self):
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-1:
if d==self.lang.connector and self.decomp[idx+1]!="য" and self.decomp[idx-1]!='অ': # exception
if self.decomp[idx-1] in self.lang.invalid_connectors or self.decomp[idx+1] in self.lang.invalid_connectors:
self.decomp[idx]=None
# handle exception
self.decomp=[d for d in self.decomp if d is not None]
word="".join(self.decomp)
if 'অ্য' in word:
word=word.replace('অ্য',"অ্যা")
self.decomp=[ch for ch in word]
def convertToAndHosonto(self):
'''
normalizes to+hosonto for ['ত','থ','ন','ব','ম','য','র']
# Example-1:
(a)বুত্পত্তি==(b)বুৎপত্তি-->False
(a) breaks as ['ব', 'ু', 'ত', '্', 'প', 'ত', '্', 'ত', 'ি']
(b) breaks as ['ব', 'ু', 'ৎ', 'প', 'ত', '্', 'ত', 'ি']
# Example-2:
(a)উত্স==(b)উৎস-->False
(a) breaks as ['উ', 'ত', '্', 'স']
(b) breaks as ['উ', 'ৎ', 'স']
'''
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-1:
# to + hosonto
if d=='ত' and self.decomp[idx+1]== self.lang.connector:
# for single case
if idx<len(self.decomp)-2:
if self.decomp[idx+2] not in self.valid_consonants_after_to_and_hosonto:
# replace
self.decomp[idx]='ৎ'
# delete
self.decomp[idx+1]=None
else:
# valid replacement for to+hos double case
if idx<len(self.decomp)-3:
if self.decomp[idx+2]=='ত' and self.decomp[idx+3]== self.lang.connector:
if idx<len(self.decomp)-4:
if self.decomp[idx+4] not in ['ব','য','র']:
# if the next charecter after the double to+hos+to+hos is with in ['ত','থ','ন','ম']
# replace
self.decomp[idx]='ৎ'
# delete
self.decomp[idx+1]=None
if idx<len(self.decomp)-4:
if self.decomp[idx+4]=='র':
# delete
self.decomp[idx+3]=None
def swapToAndHosontoDiacritics(self):
'''
puts diacritics in right place
'''
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-1:
if d=='ৎ' and self.decomp[idx+1] in self.lang.diacritics:
self.swapIdxs(idx,idx+1)
###------------------------------------------------------------------------------------------------------
def normalizeToandHosonto(self):
self.safeop(self.convertToAndHosonto)
self.safeop(self.swapToAndHosontoDiacritics)
self.baseCompose()
###------------------------------------------------------------------------------------------------------
##------------------------------------------------------------------------------------------------------
def cleanVowelDiacriticComingAfterVowel(self):
'''
takes care of vowels and modifier followed by vowel diacritics
# Example-1:
(a)উুলু==(b)উলু-->False
(a) breaks as ['উ', 'ু', 'ল', 'ু']
(b) breaks as ['উ', 'ল', 'ু']
# Example-2:
(a)আর্কিওোলজি==(b)আর্কিওলজি-->False
(a) breaks as ['আ', 'র', '্', 'ক', 'ি', 'ও', 'ো', 'ল', 'জ', 'ি']
(b) breaks as ['আ', 'র', '্', 'ক', 'ি', 'ও', 'ল', 'জ', 'ি']
Also Normalizes 'এ' and 'ত্র'
# Example-1:
(a)একএে==(b)একত্রে-->False
(a) breaks as ['এ', 'ক', 'এ', 'ে']
(b) breaks as ['এ', 'ক', 'ত', '্', 'র', 'ে']
'''
for idx,d in enumerate(self.decomp):
# if the current one is a VD and the previous char is a vowel
if d in self.lang.vowel_diacritics and self.decomp[idx-1] in self.lang.vowels:
# if the vowel is not 'এ'
if self.decomp[idx-1] !='এ':
# remove diacritic
self.decomp[idx]=None
# normalization case
else:
self.decomp[idx-1]='ত'+'্'+'র'
##------------------------------------------------------------------------------------------------------
def fixTypoForJoFola(self):
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-1:
if d== self.lang.connector and self.decomp[idx+1]=='য়':
self.decomp[idx+1]='য'
def cleanDoubleCC(self):
# c,cc,c,cc
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-3:
if d== self.lang.connector and self.decomp[idx+1] in self.lang.consonants \
and self.decomp[idx+2]==self.lang.connector and self.decomp[idx+3] in self.lang.consonants:
if self.decomp[idx+3]==self.decomp[idx+1]:
self.decomp[idx]=None
self.decomp[idx+1]=None
def cleanDoubleRef(self):
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-3:
if d=='র' and self.decomp[idx+1]==self.lang.connector\
and self.decomp[idx+2]=='র' and self.decomp[idx+3]== self.lang.connector:
self.decomp[idx]=None
self.decomp[idx+1]=None
def fixRefOrder(self):
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-3:
if d=='র' and self.decomp[idx-1]==self.lang.connector\
and self.decomp[idx+2]=='র' and self.decomp[idx+3]== self.lang.connector:
self.decomp[idx]=None
self.decomp[idx+1]=None
def fixOrdersForCC(self):
self.constructComplexDecomp()
for idx,d in enumerate(self.decomp):
if self.lang.connector in d:
if d[0]=='র' and d[1]==self.lang.connector:
start=['র',self.lang.connector]
d=d[2:]
else:
start=[]
curr=[c for c in d if c!=self.lang.connector]
# recreate order
order= ['ব','র','য']
order=[k for k in order if k in curr]
order=[c for c in curr if c not in order]+order
# sort
curr=sorted(curr,key=order.index)
new=[]
for i in range(len(curr)):
new.append(curr[i])
new.append(self.lang.connector)
new=new[:-1]
self.decomp[idx]="".join(start+new)
def cleanConnectotForJoFola(self):
for idx,d in enumerate(self.decomp):
if idx<len(self.decomp)-2:
if d== self.lang.connector and self.decomp[idx+1]=='য' and self.decomp[idx+2]==self.lang.connector:
self.decomp[idx+2]=None
def cleanInvalidConjunctDiacritics(self):
'''
cleans repeated folas
# Example-1:
(a)গ্র্রামকে==(b)গ্রামকে-->False
(a) breaks as ['গ', '্', 'র', '্', 'র', 'া', 'ম', 'ক', 'ে']
(b) breaks as ['গ', '্', 'র', 'া', 'ম', 'ক', 'ে']
'''
self.safeop(self.fixTypoForJoFola)
self.safeop(self.cleanDoubleCC)
self.safeop(self.cleanDoubleRef)
self.safeop(self.fixRefOrder)
self.safeop(self.fixOrdersForCC)
self.safeop(self.cleanConnectotForJoFola)
self.baseCompose()
##------------------------------------------------------------------------------------------------------
def checkComplexRoot(self,root):
formed=[]
formed_idx=[]
for i,c in enumerate(root):
if c !='্' and i not in formed_idx:
r=c
if i==len(root)-1:
formed.append(r)
continue
for j in range(i+2,len(root),2):
d=root[j]
k=r+'্'+d
#if k==
if k not in self.complex_roots:
formed.append(r)
break
else:
if j!=len(root)-1:
r=k
formed_idx.append(j)
else:
r=k
formed_idx.append(j)
formed.append(k)
return "".join(formed)
def convertComplexRoots(self):
self.fixNoSpaceChar()
self.decomp=[x for x in self.decomp if x is not None]
self.constructComplexDecomp()
for idx,d in enumerate(self.decomp):
if d not in self.complex_roots and self.lang.connector in d:
self.decomp[idx]=self.checkComplexRoot(d)
#-------------------------unicode ops-----------------------------------------------------------------------------
|
## Method 1: Iterative - stack
## Method 2: Recursive - in-built stack
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
## Iteration
nodeStack = []
for num in nums:
node = TreeNode(num)
while nodeStack and num > nodeStack[-1].val:
node.left = nodeStack.pop()
if nodeStack:
nodeStack[-1].right = node
nodeStack.append(node)
return nodeStack[0]
## Recursion
# def max_index(nums, l, r) -> int:
# max_i = l
# for i in range(l, r):
# if nums[max_i] < nums[i]:
# max_i = i
# return max_i
# def construct(nums, l, r) -> TreeNode:
# if l==r:
# return None
# max_i = max_index(nums, l, r)
# root = TreeNode(nums[max_i])
# root.left = construct(nums, l, max_i)
# root.right = construct(nums, max_i+1, r)
# return root
# return construct(nums, l=0, r=len(nums))
|
from django.apps import AppConfig
class NotificationsAppConfig(AppConfig):
name = 'gramgram.notifications'
# django-admin startapp notifications
|
from django.contrib import admin
from .models import fb_db
# Register your models here.
admin.site.register(fb_db)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neobolt.direct import connect, Connection
from neobolt.exceptions import ServiceUnavailable
from test.stub.tools import StubTestCase, StubCluster
class ConnectionV1TestCase(StubTestCase):
def test_construction(self):
with StubCluster({9001: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
self.assertIsInstance(cx, Connection)
def test_return_1(self):
with StubCluster({9001: "v1/return_1.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
self.assertEqual([[1]], records)
def test_disconnect_on_run(self):
with StubCluster({9001: "v1/disconnect_on_run.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
with self.assertRaises(ServiceUnavailable):
metadata = {}
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.sync()
def test_disconnect_on_pull_all(self):
with StubCluster({9001: "v1/disconnect_on_pull_all.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
with self.assertRaises(ServiceUnavailable):
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
def test_disconnect_after_init(self):
with StubCluster({9001: "v1/disconnect_after_init.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
with self.assertRaises(ServiceUnavailable):
metadata = {}
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.sync()
class ConnectionV3TestCase(StubTestCase):
def test_construction(self):
with StubCluster({9001: "v3/empty.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
self.assertIsInstance(cx, Connection)
def test_return_1(self):
with StubCluster({9001: "v3/return_1.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
self.assertEqual([[1]], records)
def test_return_1_in_tx(self):
with StubCluster({9001: "v3/return_1_in_tx.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.begin(on_success=metadata.update)
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.commit(on_success=metadata.update)
cx.sync()
self.assertEqual([[1]], records)
self.assertEqual({"fields": ["x"], "bookmark": "bookmark:1"}, metadata)
def test_begin_with_metadata(self):
with StubCluster({9001: "v3/begin_with_metadata.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.begin(metadata={"mode": "r"}, on_success=metadata.update)
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.commit(on_success=metadata.update)
cx.sync()
self.assertEqual([[1]], records)
self.assertEqual({"fields": ["x"], "bookmark": "bookmark:1"}, metadata)
def test_begin_with_timeout(self):
with StubCluster({9001: "v3/begin_with_timeout.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.begin(timeout=12.34, on_success=metadata.update)
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.commit(on_success=metadata.update)
cx.sync()
self.assertEqual([[1]], records)
self.assertEqual({"fields": ["x"], "bookmark": "bookmark:1"}, metadata)
def test_run_with_bookmarks(self):
with StubCluster({9001: "v3/run_with_bookmarks.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, bookmarks=["foo", "bar"], on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
self.assertEqual([[1]], records)
def test_run_with_metadata(self):
with StubCluster({9001: "v3/run_with_metadata.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, metadata={"mode": "r"}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
self.assertEqual([[1]], records)
def test_run_with_timeout(self):
with StubCluster({9001: "v3/run_with_timeout.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, timeout=12.34, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
self.assertEqual([[1]], records)
def test_disconnect_on_run(self):
with StubCluster({9001: "v3/disconnect_on_run.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
with self.assertRaises(ServiceUnavailable):
metadata = {}
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.sync()
def test_disconnect_on_pull_all(self):
with StubCluster({9001: "v3/disconnect_on_pull_all.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
with self.assertRaises(ServiceUnavailable):
metadata = {}
records = []
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.pull_all(on_success=metadata.update, on_records=records.extend)
cx.sync()
def test_disconnect_after_init(self):
with StubCluster({9001: "v3/disconnect_after_init.script"}):
address = ("127.0.0.1", 9001)
with connect(address, auth=self.auth_token, encrypted=False) as cx:
with self.assertRaises(ServiceUnavailable):
metadata = {}
cx.run("RETURN $x", {"x": 1}, on_success=metadata.update)
cx.sync()
|
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from typing import List
class GoogleRoleServiceAccountUser(BaseResourceCheck):
def __init__(self):
name = "Ensure that IAM users are not assigned the Service Account User or Service Account Token Creator roles" \
" at project level"
id = "CKV_GCP_41"
supported_resources = ['google_project_iam_binding', 'google_project_iam_member']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
if 'role' in conf.keys() and \
conf['role'][0] not in ['roles/iam.serviceAccountUser', 'roles/iam.serviceAccountTokenCreator']:
return CheckResult.PASSED
return CheckResult.FAILED
def get_evaluated_keys(self) -> List[str]:
return ['role']
check = GoogleRoleServiceAccountUser()
|
import argparse
import datetime as dt
from posixpath import dirname
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader.data as web
import numpy as np
import os
import pickle
from mpldatacursor import datacursor
# Global variables
TICKER_INFO_FILE = 'ticker_info.csv'
TICKER_DATA_PATH = 'sec_dfs'
# A mapping from legend to line for clickable legend
LegToLine = dict()
class Securities:
def __init__(self):
self.ticker_info = self.load_ticker_info()
def load_ticker_info(self):
""" Load the ticker info
"""
dirname = os.path.dirname(__file__)
ticker_info_path = os.path.join(dirname, TICKER_INFO_FILE)
return pd.read_csv(ticker_info_path, index_col='Ticker')
def get_ticker_data_path(self, ticker):
""" Get the file path for ticker data
"""
dirname = os.path.dirname(__file__)
ticker_data_path = os.path.join(
dirname, TICKER_DATA_PATH, '{}.csv'.format(ticker))
return ticker_data_path
def get_ticker_data(self, ticker):
""" Get the data for a ticker
"""
try:
# print('Compiling {}'.format(ticker))
ticker_data_path = self.get_ticker_data_path(ticker)
df = pd.read_csv(ticker_data_path,
index_col='Date', parse_dates=['Date'])
df.rename(columns={'Adj Close': ticker}, inplace=True)
df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)
return df
except Exception as e:
print('Failed to compile {}:\n\t{}'.format(ticker, e))
return None
def get_all_tickers(self):
""" Get all tickers
"""
return self.ticker_info.index.tolist()
def get_tickers_by_group(self, group):
""" Get tickers by group
"""
df = self.ticker_info
return df.loc[df['Group'] == group].index.tolist()
def get_tickers_by_country(self, country):
""" Get tickers by country
"""
df = self.ticker_info
return df.loc[df['Country'] == country].index.tolist()
def get_title(self, ticker):
""" Get title for ticker
"""
title = self.ticker_info.loc[ticker, 'Title']
return title
def load_data(self, tickers):
""" Load data for tickers
"""
main_df = pd.DataFrame()
for ticker in tickers:
df = self.get_ticker_data(ticker)
if df is not None:
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df, how='outer')
return main_df
def update_ticker_data(self, tickers):
""" Update the security data to the current date
"""
for ticker in tickers:
try:
print('Updating {}'.format(ticker))
# Load old data
df_old = pd.read_csv(self.get_ticker_data_path(
ticker), index_col='Date', parse_dates=['Date'])
# Get last date
last_date = df_old.index[-1]
# Download new data
start = last_date
end = dt.datetime.today()
df_new = web.DataReader(ticker, 'yahoo', start, end)
# Append
df = df_old.append(df_new)
# Remove duplicates
# Dropping duplicates using index doesn't work
df = df.reset_index().drop_duplicates(subset='Date').set_index('Date')
# Save
df.to_csv(self.get_ticker_data_path(ticker))
except Exception as e:
print('Failed to update {}:\n\t{}'.format(ticker, e))
def download_ticker_data(self, start, end, tickers, force=False):
""" Download data from yahoo for provided tickers
"""
if not os.path.exists(TICKER_DATA_PATH):
os.makedirs(TICKER_DATA_PATH)
for ticker in tickers:
if not os.path.exists(self.get_ticker_data_path(ticker)) or force:
try:
print('Downloading {}'.format(ticker))
df = web.DataReader(ticker, 'yahoo', start, end)
df.to_csv(self.get_ticker_data_path(ticker))
except Exception as e:
print('Failed to download {}:\n\t{}'.format(ticker, e))
else:
print('Already have {}'.format(ticker))
def process_ticker_data(self, start, end, tickers, resample_string=None):
""" Get subset of resampled data
"""
# Set start and end
start = dt.datetime.strptime(start, '%Y/%m/%d')
end = dt.datetime.strptime(end, '%Y/%m/%d')
# Load the data
df = self.load_data(tickers)
# Get subset of time
df = get_timeframe(df, start, end)
# Resample
if resample_string is not None:
df = df.resample(resample_string).mean()
return df
def output_ticker_data(self, start, end, resample_string, tickers, file_path):
""" Output data to csv
"""
df = self.process_ticker_data(
start=start, end=end, tickers=tickers, resample_string=resample_string)
print(df)
df.to_csv(file_path)
def js_output(self, start, end, resample_string, tickers):
""" Output data for js
"""
df = self.process_ticker_data(
start=start, end=end, tickers=tickers, resample_string=resample_string)
print(df.to_csv())
def plot_data(self, start, end, tickers):
# Set start and end
start = dt.datetime.strptime(start, '%Y/%m/%d')
end = dt.datetime.strptime(end, '%Y/%m/%d')
# Load the data
main_df = self.load_data(tickers)
# Get subset of time
df = get_timeframe(main_df, start, end)
# normalize
df, factors = normalize(df)
# Get last non null indexes
idx = df.apply(pd.Series.last_valid_index)
# Get last non null values
last_values = []
for col in df:
last_value = df[col].loc[idx[col]]
last_values.append(last_value)
# Sort
sort_order = np.argsort(np.array(last_values)*-1)
factors = np.array(factors)[sort_order]
last_values = np.array(last_values)[sort_order]
df = reorder_cols(df, sort_order)
tickers = list(df)
# Get legend
leg = []
titles = [self.get_title(ticker) for ticker in tickers]
for ticker, title, factor, last_value in zip(tickers, titles, factors, last_values):
leg.append('{} ({}): ({:.0f}/{:.0f}) {:.2f}'.format(title,
ticker, last_value*factor, factor, last_value))
# Register plotting converter
pd.plotting.register_matplotlib_converters()
# Plot
plt.plot(df)
plt.xlabel('date')
plt.ylabel('gain')
plt.grid('True')
plt.legend(leg)
# Shrink current axis by 20%
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
leg = ax.legend(leg, loc='center left',
bbox_to_anchor=(1, 0.5), prop={'size': 8})
# Make the legend clickable
for legline, origline in zip(leg.get_lines(), ax.get_lines()):
legline.set_picker(5) # 5pt tolerance
LegToLine[legline] = origline
# Add data cursors
datacursor(display='multiple', draggable=True)
# Handle pick events
plt.gcf().canvas.mpl_connect('pick_event', onpick)
plt.show()
def normalize(df_in):
# Store normalization factors
factors = []
# Get first non null indexes
idx = df_in.apply(pd.Series.first_valid_index)
# Copy
df = df_in.copy()
# Normalize each column
for col in df:
factor = df[col].loc[idx[col]]
factors.append(factor)
df[col] = df[col]/factor
return df, factors
def reorder_cols(df, order):
""" Reorder columns of a dataframe
"""
# Get the list of columns and reorder them
cols = df.columns.tolist()
cols = np.array(cols)[order]
return df[cols]
def get_timeframe(df, start, end):
""" Get timeframe section from df
"""
# Create mask between time
mask = (df.index > start) & (df.index < end)
# Set output
df_out = df.loc[mask].copy()
# Remove empty columns
df_out.dropna(axis='columns', how='all', inplace=True)
return df_out
def onpick(event):
""" Handle pick event on the legend
"""
# on the click event, find the orig line corresponding to the legend line, and toggle visibility
legline = event.artist
if legline in LegToLine:
origline = LegToLine[legline]
else:
return
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
# Redraw the figure
plt.gcf().canvas.draw()
def parse_args():
""" Parse arguments for program
"""
parser = argparse.ArgumentParser(description="Program for analyzing stock")
subparsers = parser.add_subparsers(dest='sub_cmd', required=True)
plot_parser = subparsers.add_parser('plot', help='plot the stock data')
plot_parser.add_argument(
'-s', '--start', default='2021/1/1', help="start date in the format YYYY/mm/dd")
plot_parser.add_argument('-e', '--end', default=dt.datetime.today().strftime(
"%Y/%m/%d"), help="end date in the format YYYY/mm/dd")
plot_parser.add_argument(
'-t', '--tickers', nargs='*', help='List of tickers')
plot_parser.add_argument('-g', '--group', help='Group')
plot_parser.add_argument('-c', '--country', help='Country')
plot_parser.set_defaults(func='plot')
output_parser = subparsers.add_parser(
'output', help='output the stock data')
output_parser.add_argument(
'-s', '--start', default='2018/1/1', help="start date in the format YYYY/mm/dd")
output_parser.add_argument('-e', '--end', default=dt.datetime.today().strftime(
"%Y/%m/%d"), help="end date in the format YYYY/mm/dd")
output_parser.add_argument(
'-t', '--tickers', nargs='*', help='List of tickers')
output_parser.add_argument(
'-r', '--resample_string', help='Resample string')
output_parser.add_argument(
'-f', '--file_path', default='temp/output.csv', help='Output file path')
output_parser.add_argument('-g', '--group', help='Group')
output_parser.add_argument('-c', '--country', help='Country')
output_parser.set_defaults(func='output')
update_parser = subparsers.add_parser(
'update', help='update the stock data')
update_parser.add_argument(
'-t', '--tickers', nargs='*', help='List of tickers')
update_parser.add_argument('-g', '--group', help='Group')
update_parser.add_argument('-c', '--country', help='Country')
update_parser.set_defaults(func='update')
download_parser = subparsers.add_parser(
'download', help='download the stock data')
download_parser.add_argument(
'-s', '--start', default='2000/1/1', help="start date in the format YYYY/mm/dd")
download_parser.add_argument('-e', '--end', default=dt.datetime.today(
).strftime("%Y/%m/%d"), help="end date in the format YYYY/mm/dd")
download_parser.add_argument(
'-f', '--force', action='store_true', help="force download")
download_parser.add_argument(
'-t', '--tickers', nargs='*', help='List of tickers')
download_parser.add_argument('-g', '--group', help='Group')
download_parser.add_argument('-c', '--country', help='Country')
download_parser.set_defaults(func='download')
js_output_parser = subparsers.add_parser(
'js_output', help='output for use in js')
js_output_parser.add_argument(
'-s', '--start', default='2018/1/1', help="start date in the format YYYY/mm/dd")
js_output_parser.add_argument('-e', '--end', default=dt.datetime.today(
).strftime("%Y/%m/%d"), help="end date in the format YYYY/mm/dd")
js_output_parser.add_argument(
'-t', '--tickers', nargs='*', help='List of tickers')
js_output_parser.add_argument(
'-r', '--resample_string', help='Resample string')
js_output_parser.add_argument('-g', '--group', help='Group')
js_output_parser.add_argument('-c', '--country', help='Country')
js_output_parser.set_defaults(func='js_output')
return parser.parse_args()
if __name__ == "__main__":
# Initialize securities object
secs = Securities()
args = parse_args()
# Select tickers
if args.group:
tickers = secs.get_tickers_by_group(args.group)
elif args.country:
tickers = secs.get_tickers_by_country(args.country)
elif args.tickers:
tickers = args.tickers
else:
tickers = secs.get_all_tickers()
if args.func == 'plot':
secs.plot_data(start=args.start, end=args.end, tickers=tickers)
if args.func == 'update':
secs.update_ticker_data(tickers=tickers)
if args.func == 'download':
secs.download_ticker_data(
start=args.start, end=args.end, tickers=tickers, force=args.force)
if args.func == 'output':
if not args.resample_string:
resample_string = None
else:
resample_string = args.resample_string
secs.output_ticker_data(start=args.start, end=args.end,
resample_string=resample_string, tickers=tickers, file_path=args.file_path)
if args.func == 'js_output':
if not args.resample_string:
resample_string = None
else:
resample_string = args.resample_string
secs.js_output(start=args.start, end=args.end,
resample_string=resample_string, tickers=tickers)
|
import re
from itertools import chain
from typing import List
import click
from mach import types
from mach.__version__ import __version__
from mach.exceptions import ValidationError
STORE_KEY_RE = re.compile(r"^[\w_-]*$")
def validate_config(config: types.MachConfig, *, ignore_version=True):
"""Check the config for invalid configuration."""
if not ignore_version and config.mach_composer.version != __version__:
raise ValidationError(
f"MACH composer version defined in configuration ({config.mach_composer.version}) "
f"does not match current version {__version__}"
)
validate_general_config(config.general_config)
validate_components(config)
for site in config.sites:
validate_site(site, config=config)
def validate_general_config(config: types.GlobalConfig):
if config.cloud == types.CloudOption.AZURE:
if not config.azure:
raise ValidationError("Missing azure configuration")
if not config.terraform_config.azure_remote_state:
raise ValidationError("Missing azure_remote_state configuration")
if config.terraform_config.aws_remote_state:
raise ValidationError(
"Found aws_remote_state configuration, while cloud is set to 'azure'"
)
elif config.cloud == types.CloudOption.AWS:
if not config.terraform_config.aws_remote_state:
raise ValidationError("Missing aws_remote_state configuration")
if config.terraform_config.azure_remote_state:
raise ValidationError(
"Found azure_remote_state configuration, while cloud is set to 'aws'"
)
if config.terraform_config:
validate_terraform_config(config.terraform_config)
if config.sentry:
validate_sentry_config(config.sentry)
def validate_terraform_config(config: types.TerraformConfig):
if config.providers:
click.secho("Terraform provider versions", bold=True, fg="yellow")
click.secho(
"\n".join(
[
"You are using custom Terraform provider versions.",
"Please be aware that some unexpected changes might occur compared to the MACH defaults.", # noqa
]
),
fg="yellow",
)
def validate_site(site: types.Site, *, config: types.MachConfig):
if config.general_config.cloud == types.CloudOption.AWS and not site.aws:
raise ValidationError(f"Site {site.identifier} is missing an aws configuration")
if config.general_config.cloud == types.CloudOption.AZURE:
if site.endpoints and not site.azure.frontdoor:
raise ValidationError(
f"Site {site.identifier} has endpoints defined but frontdoor is not configured. "
"Make sure your azure.frontdoor block is correcty configured in either the global "
"or site configuration."
)
validate_endpoints(site, config.general_config.cloud)
validate_commercetools(site)
if site.components:
validate_site_components(site.components, site=site)
def validate_endpoints(site: types.Site, cloud: types.CloudOption):
# Construct lookup dictionary of all endpoints with the components that use them
expected_endpoint_names = set(
chain.from_iterable(c.endpoints.values() for c in site.components)
)
endpoint_names = {e.key for e in site.endpoints}
missing = expected_endpoint_names - endpoint_names
if missing:
raise ValidationError(f"Missing required endpoints {', '.join(missing)}")
def validate_site_components(components: List[types.Component], *, site: types.Site):
"""Sanity checks on component configuration per site."""
defined_stores = (
[s.key for s in site.commercetools.stores] if site.commercetools else []
)
for component in components:
if component.health_check_path and not component.health_check_path.startswith(
"/"
):
raise ValidationError(
f"Component health check {component.health_check_path} does "
"not start with '/'."
)
for store in component.store_variables.keys():
if store not in defined_stores:
raise ValidationError(
f"Store {store} is not defined in your commercetools stores definition"
)
if site.azure:
if len(component.endpoints) > 1:
raise ValidationError(
f"The '{component.name}' component has multiple endpoints defined. "
"This is not supported on Azure yet.\n"
"See https://github.com/labd/mach-composer/issues/64 for more information."
)
service_plans = site.azure.service_plans
if (
component.azure
and component.azure.service_plan
and component.azure.service_plan not in service_plans
):
raise ValidationError(
f"Component {component.name} requires service plan "
f"{component.azure.service_plan} which is not defined in the "
"Azure configuration."
)
def validate_commercetools(site: types.Site):
if site.commercetools:
validate_store_keys(site.commercetools)
def validate_store_keys(ct_settings: types.CommercetoolsSettings):
"""Sanity checks on store values."""
if ct_settings.stores:
store_keys = [store.key for store in ct_settings.stores]
for key in store_keys:
if len(key) < 2:
raise ValidationError(
f"Store key {key} should be minimum two characters."
)
if store_keys.count(key) != 1:
raise ValidationError(f"Store key {key} must be unique.")
if not STORE_KEY_RE.match(key):
raise ValidationError(
f"Store key {key} may only contain alphanumeric characters or underscores"
)
def validate_components(config: types.MachConfig):
"""Validate global component data is valid."""
if config.general_config.cloud == types.CloudOption.AWS:
validate_aws_components(config)
elif config.general_config.cloud == types.CloudOption.AZURE:
validate_azure_components(config)
def validate_sentry_config(config: types.SentryConfig):
if not any([config.dsn, config.auth_token]):
raise ValidationError("sentry: Either dsn or auth_token should be set")
if all([config.dsn, config.auth_token]):
raise ValidationError("sentry: Only a dsn or auth_token should be defined")
if config.auth_token and not any([config.project, config.organization]):
raise ValidationError(
"sentry: A project and organization should be defined when using an auth_token"
)
def validate_aws_components(config: types.MachConfig):
"""Validate components specifically for AWS usage."""
pass
def validate_azure_components(config: types.MachConfig):
"""Validate components specifically for Azure usage.
Only requirements for now is that a correct short_name should be set.
Otherwise problems will arise when creating the Azure resources since
for example Storage Accounts names have a limited length.
"""
for comp in config.components:
if comp.artifacts:
raise ValidationError(
f"The artifacts options on the '{comp.name}' component "
"are not supported when targetting Azure"
)
if "azure" not in comp.integrations:
continue
# azure naming length is limited, so verify it doesn't get too long.
assert comp.azure and comp.azure.short_name # Should have been set by parser
if len(comp.azure.short_name) > 10:
raise ValidationError(
f"Component {comp.name} short_name '{comp.azure.short_name}' "
"cannot be more than 10 characters."
)
|
import time
def sample(a, b):
"""
Test case 1
"""
for _ in range(2):
print("yes")
x = 8
y = 14
x = a + b
y = x * 2
print('Math test: ' + str(y))
fl = 1.234
a_string = "this is a string"
print(a_string)
num_list = []
num_list = [500, 600, 700]
num_list = [100, 200, 700, 800]
num_list.remove(200)
dict_test = dict()
dict_test["one"] = 2
dict_test = {"one": 1, "two": 2}
print("Test 2, test 3, Test the test?")
dict_test = {"one": {"yes": "yes"}, "two": 2} # text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test text wrapping test
dict_test["one"]["yes"] = 0
friends = ['john', 'pat', 'gary', 'michael']
for i, name in enumerate(friends):
print("iteration {iteration} is {name}".format(iteration=i, name=name))
def nested_loop(a):
"""
Test case 2
"""
test_a = a
num_list = [500, 600, 700]
alpha_list = ['x', 'y', 'z']
for number in num_list:
print(number)
time.sleep(1)
for letter in alpha_list:
print(letter)
time.sleep(3.4)
|
from __future__ import absolute_import
import unittest
class TestCase(unittest.TestCase):
def __call__(self, result=None):
"""Run a test without having to call super in setUp and tearDown."""
self._pre_setup()
unittest.TestCase.__call__(self, result)
self._post_teardown()
def _pre_setup(self):
pass
def _post_teardown(self):
pass
def assertCountEqual(self, *args, **kwargs):
# Raaa Python 3
try:
return unittest.TestCase.assertCountEqual(self, *args, **kwargs)
except AttributeError:
return self.assertItemsEqual(*args, **kwargs)
|
"""
Author: Justin Cappos
Start Date: July 1st, 2008
Description:
Handles exiting and killing all threads, tracking CPU / Mem usage, etc.
"""
import threading
import os
import time
# needed for sys.stderr and windows Popen hackery
import sys
# needed for signal numbers
import signal
# needed for harshexit
import harshexit
# print useful info when exiting...
import tracebackrepy
# used to query status, etc.
# This may fail on Windows CE
try:
import subprocess
mobile_no_subprocess = False
except ImportError:
# Set flag to avoid using subprocess
mobile_no_subprocess = True
try:
import android
IS_ANDROID = True
except ImportError:
IS_ANDROID = False
# used for socket.error
import socket
# need for status retrieval
import statusstorage
# Get constants
import repy_constants
# Get access to the status interface so we can start it
import nmstatusinterface
# This allows us to meter resource use
import nanny
# This is used for IPC
import marshal
# This will fail on non-windows systems
try:
import windows_api as windows_api
except:
windows_api = None
# Armon: This is a place holder for the module that will be imported later
os_api = None
# Armon: See additional imports at the bottom of the file
class UnsupportedSystemException(Exception):
pass
################### Publicly visible functions #######################
# check the disk space used by a dir.
def compute_disk_use(dirname):
# Convert path to absolute
dirname = os.path.abspath(dirname)
diskused = 0
for filename in os.listdir(dirname):
try:
diskused = diskused + os.path.getsize(os.path.join(dirname, filename))
except IOError: # They likely deleted the file in the meantime...
pass
except OSError: # They likely deleted the file in the meantime...
pass
# charge an extra 4K for each file to prevent lots of little files from
# using up the disk. I'm doing this outside of the except clause in
# the failure to get the size wasn't related to deletion
diskused = diskused + 4096
return diskused
# prepare a socket so it behaves how we want
def preparesocket(socketobject):
if ostype == 'Windows':
# we need to set a timeout because on rare occasions Windows will block
# on recvmess with a bad socket. This prevents it from locking the system.
# We use select, so the timeout should never be actually used.
# The actual value doesn't seem to matter, so I'll use 100 years
socketobject.settimeout(60*60*24*365*100)
elif ostype == 'Linux' or ostype == 'Darwin':
# Linux seems not to care if we set the timeout, Mac goes nuts and refuses
# to let you send from a socket you're receiving on (why?)
pass
else:
raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")"
# Armon: Also launches the nmstatusinterface thread.
# This will result in an internal thread on Windows
# and a thread on the external process for *NIX
def monitor_cpu_disk_and_mem():
if IS_ANDROID:
do_forked_resource_monitor_android()
elif ostype == 'Linux' or ostype == 'Darwin':
# Startup a CPU monitoring thread/process
do_forked_resource_monitor()
elif ostype == 'Windows':
# Now we set up a cpu nanny...
WinCPUNannyThread().start()
# Launch mem./disk resource nanny
WindowsNannyThread().start()
# Start the nmstatusinterface. Windows means repy isn't run in an external
# process, so pass None instead of a process id.
nmstatusinterface.launch(None)
else:
raise UnsupportedSystemException, "Unsupported system type: '"+osrealtype+"' (alias: "+ostype+")"
# Elapsed time
elapsedtime = 0
# Store the uptime of the system when we first get loaded
starttime = 0
last_uptime = 0
# Timestamp from our starting point
last_timestamp = time.time()
# This is our uptime granularity
granularity = 1
# This ensures only one thread calling getruntime at any given time
runtimelock = threading.Lock()
def getruntime():
"""
<Purpose>
Return the amount of time the program has been running. This is in
wall clock time. This function is not guaranteed to always return
increasing values due to NTP, etc.
<Arguments>
None
<Exceptions>
None.
<Side Effects>
None
<Remarks>
By default this will have the same granularity as the system clock. However, if time
goes backward due to NTP or other issues, getruntime falls back to system uptime.
This has much lower granularity, and varies by each system.
<Returns>
The elapsed time as float
"""
global starttime, last_uptime, last_timestamp, elapsedtime, granularity, runtimelock
# Get the lock
runtimelock.acquire()
# Check if Linux or BSD/Mac
if ostype in ["Linux", "Darwin"]:
uptime = os_api.get_system_uptime()
# Check if time is going backward
if uptime < last_uptime:
# If the difference is less than 1 second, that is okay, since
# The boot time is only precise to 1 second
if (last_uptime - uptime) > 1:
raise EnvironmentError, "Uptime is going backwards!"
else:
# Use the last uptime
uptime = last_uptime
# No change in uptime
diff_uptime = 0
else:
# Current uptime, minus the last uptime
diff_uptime = uptime - last_uptime
# Update last uptime
last_uptime = uptime
# Check for windows
elif ostype in ["Windows"]:
# Release the lock
runtimelock.release()
# Time.clock returns elapsedtime since the first call to it, so this works for us
return time.clock()
# Who knows...
else:
raise EnvironmentError, "Unsupported Platform!"
# Current uptime minus start time
runtime = uptime - starttime
# Get runtime from time.time
current_time = time.time()
# Current time, minus the last time
diff_time = current_time - last_timestamp
# Update the last_timestamp
last_timestamp = current_time
# Is time going backward?
if diff_time < 0.0:
# Add in the change in uptime
elapsedtime += diff_uptime
# Lets check if time.time is too skewed
else:
skew = abs(elapsedtime + diff_time - runtime)
# If the skew is too great, use uptime instead of time.time()
if skew < granularity:
elapsedtime += diff_time
else:
elapsedtime += diff_uptime
# Release the lock
runtimelock.release()
# Return the new elapsedtime
return elapsedtime
# This lock is used to serialize calls to get_resources
get_resources_lock = threading.Lock()
# Cache the disk used from the external process
cached_disk_used = 0L
# This array holds the times that repy was stopped.
# It is an array of tuples, of the form (time, amount)
# where time is when repy was stopped (from getruntime()) and amount
# is the stop time in seconds. The last process_stopped_max_entries are retained
process_stopped_timeline = []
process_stopped_max_entries = 100
# Method to expose resource limits and usage
def get_resources():
"""
<Purpose>
Returns the resource utilization limits as well
as the current resource utilization.
<Arguments>
None.
<Returns>
A tuple of dictionaries and an array (limits, usage, stoptimes).
Limits is the dictionary which maps the resource name
to its maximum limit.
Usage is the dictionary which maps the resource name
to its current usage.
Stoptimes is an array of tuples with the times which the Repy process
was stopped and for how long, due to CPU over-use.
Each entry in the array is a tuple (TOS, Sleep Time) where TOS is the
time of stop (respective to getruntime()) and Sleep Time is how long the
repy process was suspended.
The stop times array holds a fixed number of the last stop times.
Currently, it holds the last 100 stop times.
"""
# Acquire the lock...
get_resources_lock.acquire()
# ...but always release it
try:
# Construct the dictionaries as copies from nanny
(limits,usage) = nanny.get_resource_information()
# Calculate all the usage's
pid = os.getpid()
# Get CPU and memory, this is thread specific
if ostype in ["Linux", "Darwin"]:
# Get CPU first, then memory
usage["cpu"] = os_api.get_process_cpu_time(pid)
# This uses the cached PID data from the CPU check
usage["memory"] = os_api.get_process_rss()
# Get the thread specific CPU usage
usage["threadcpu"] = os_api.get_current_thread_cpu_time()
# Windows Specific versions
elif ostype in ["Windows"]:
# Get the CPU time
usage["cpu"] = windows_api.get_process_cpu_time(pid)
# Get the memory, use the resident set size
usage["memory"] = windows_api.process_memory_info(pid)['WorkingSetSize']
# Get thread-level CPU
usage["threadcpu"] = windows_api.get_current_thread_cpu_time()
# Unknown OS
else:
raise EnvironmentError("Unsupported Platform!")
# Use the cached disk used amount
usage["diskused"] = cached_disk_used
finally:
# Release the lock
get_resources_lock.release()
# Copy the stop times
stoptimes = process_stopped_timeline[:]
# Return the dictionaries and the stoptimes
return (limits,usage,stoptimes)
################### Windows specific functions #######################
class WindowsNannyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self,name="NannyThread")
def run(self):
# How often the memory will be checked (seconds)
memory_check_interval = repy_constants.CPU_POLLING_FREQ_WIN
# The ratio of the disk polling time to memory polling time.
disk_to_memory_ratio = int(repy_constants.DISK_POLLING_HDD / memory_check_interval)
# Which cycle number we're on
counter = 0
# Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem
windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
# need my pid to get a process handle...
mypid = os.getpid()
# run forever (only exit if an error occurs)
while True:
try:
# Increment the interval counter
counter += 1
# Check memory use, get the WorkingSetSize or RSS
memused = windows_api.process_memory_info(mypid)['WorkingSetSize']
if memused > nanny.get_resource_limit("memory"):
# We will be killed by the other thread...
raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'"
# Check if we should check the disk
if (counter % disk_to_memory_ratio) == 0:
# Check diskused
diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)
if diskused > nanny.get_resource_limit("diskused"):
raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'"
# Sleep until the next iteration of checking the memory
time.sleep(memory_check_interval)
except windows_api.DeadProcess:
# Process may be dead, or die while checking memory use
# In any case, there is no reason to continue running, just exit
harshexit.harshexit(99)
except:
tracebackrepy.handle_exception()
print >> sys.stderr, "Nanny died! Trying to kill everything else"
harshexit.harshexit(20)
# Windows specific CPU Nanny Stuff
winlastcpuinfo = [0,0]
# Enforces CPU limit on Windows and Windows CE
def win_check_cpu_use(cpulim, pid):
global winlastcpuinfo
# get use information and time...
now = getruntime()
# Get the total cpu time
usertime = windows_api.get_process_cpu_time(pid)
useinfo = [usertime, now]
# get the previous time and cpu so we can compute the percentage
oldusertime = winlastcpuinfo[0]
oldnow = winlastcpuinfo[1]
if winlastcpuinfo == [0,0]:
winlastcpuinfo = useinfo
# give them a free pass if it's their first time...
return 0
# save this data for next time...
winlastcpuinfo = useinfo
# Get the elapsed time...
elapsedtime = now - oldnow
# This is a problem
if elapsedtime == 0:
return -1 # Error condition
# percent used is the amount of change divided by the time...
percentused = (usertime - oldusertime) / elapsedtime
# Calculate amount of time to sleep for
stoptime = nanny.calculate_cpu_sleep_interval(cpulim, percentused,elapsedtime)
if stoptime > 0.0:
# Try to timeout the process
if windows_api.timeout_process(pid, stoptime):
# Log the stoptime
process_stopped_timeline.append((now, stoptime))
# Drop the first element if the length is greater than the maximum entries
if len(process_stopped_timeline) > process_stopped_max_entries:
process_stopped_timeline.pop(0)
# Return how long we slept so parent knows whether it should sleep
return stoptime
else:
# Process must have been making system call, try again next time
return -1
# If the stop time is 0, then avoid calling timeout_process
else:
return 0.0
# Dedicated Thread for monitoring CPU, this is run as a part of repy
class WinCPUNannyThread(threading.Thread):
# Thread variables
pid = 0 # Process pid
def __init__(self):
self.pid = os.getpid()
threading.Thread.__init__(self,name="CPUNannyThread")
def run(self):
# Elevate our priority, set us to the highest so that we can more effectively throttle
success = windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_HIGHEST)
# If we failed to get HIGHEST priority, try above normal, else we're still at default
if not success:
windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
# Run while the process is running
while True:
try:
# Get the frequency
frequency = repy_constants.CPU_POLLING_FREQ_WIN
# Base amount of sleeping on return value of
# win_check_cpu_use to prevent under/over sleeping
slept = win_check_cpu_use(nanny.get_resource_limit("cpu"), self.pid)
if slept == -1:
# Something went wrong, try again
pass
elif (slept < frequency):
time.sleep(frequency-slept)
except windows_api.DeadProcess:
# Process may be dead
harshexit.harshexit(97)
except:
tracebackrepy.handle_exception()
print >> sys.stderr, "CPU Nanny died! Trying to kill everything else"
harshexit.harshexit(25)
############## *nix specific functions (may include Mac) ###############
# This method handles messages on the "diskused" channel from
# the external process. When the external process measures disk used,
# it is piped in and cached for calls to getresources.
def IPC_handle_diskused(bytes):
cached_disk_used = bytes
# This method handles messages on the "repystopped" channel from
# the external process. When the external process stops repy, it sends
# a tuple with (TOS, amount) where TOS is time of stop (getruntime()) and
# amount is the amount of time execution was suspended.
def IPC_handle_stoptime(info):
# Push this onto the timeline
process_stopped_timeline.append(info)
# Drop the first element if the length is greater than the max
if len(process_stopped_timeline) > process_stopped_max_entries:
process_stopped_timeline.pop(0)
# Use a special class of exception for when
# resource limits are exceeded
class ResourceException(Exception):
pass
# Armon: Method to write a message to the pipe, used for IPC.
# This allows the pipe to be multiplexed by sending simple dictionaries
def write_message_to_pipe(writehandle, channel, data):
"""
<Purpose>
Writes a message to the pipe
<Arguments>
writehandle:
A handle to a pipe which can be written to.
channel:
The channel used to describe the data. Used for multiplexing.
data:
The data to send.
<Exceptions>
As with os.write()
EnvironmentError will be thrown if os.write() sends 0 bytes, indicating the
pipe is broken.
"""
# Construct the dictionary
mesg_dict = {"ch":channel,"d":data}
# Convert to a string
mesg_dict_str = marshal.dumps(mesg_dict)
# Make a full string
mesg = str(len(mesg_dict_str)) + ":" + mesg_dict_str
# Send this
index = 0
while index < len(mesg):
bytes = os.write(writehandle, mesg[index:])
if bytes == 0:
raise EnvironmentError, "Write send 0 bytes! Pipe broken!"
index += bytes
# Armon: Method to read a message from the pipe, used for IPC.
# This allows the pipe to be multiplexed by sending simple dictionaries
def read_message_from_pipe(readhandle):
"""
<Purpose>
Reads a message from a pipe.
<Arguments>
readhandle:
A handle to a pipe which can be read from
<Exceptions>
As with os.read().
EnvironmentError will be thrown if os.read() returns a 0-length string, indicating
the pipe is broken.
<Returns>
A tuple (Channel, Data) where Channel is used to multiplex the pipe.
"""
# Read until we get to a colon
data = ""
index = 0
# Loop until we get a message
while True:
# Read in data if the buffer is empty
if index >= len(data):
# Read 8 bytes at a time
mesg = os.read(readhandle,8)
if len(mesg) == 0:
raise EnvironmentError, "Read returned empty string! Pipe broken!"
data += mesg
# Increment the index while there is data and we have not found a colon
while index < len(data) and data[index] != ":":
index += 1
# Check if we've found a colon
if len(data) > index and data[index] == ":":
# Get the message length
mesg_length = int(data[:index])
# Determine how much more data we need
more_data = mesg_length - len(data) + index + 1
# Read in the rest of the message
while more_data > 0:
mesg = os.read(readhandle, more_data)
if len(mesg) == 0:
raise EnvironmentError, "Read returned empty string! Pipe broken!"
data += mesg
more_data -= len(mesg)
# Done, convert the message to a dict
whole_mesg = data[index+1:]
mesg_dict = marshal.loads(whole_mesg)
# Return a tuple (Channel, Data)
return (mesg_dict["ch"],mesg_dict["d"])
# This dictionary defines the functions that handle messages
# on each channel. E.g. when a message arrives on the "repystopped" channel,
# the IPC_handle_stoptime function should be invoked to handle it.
IPC_HANDLER_FUNCTIONS = {"repystopped":IPC_handle_stoptime,
"diskused":IPC_handle_diskused }
# This thread checks that a process is alive and invokes
# delegate methods when messages arrive on the pipe.
class monitor_process_checker(threading.Thread):
def __init__(self, readhandle):
"""
<Purpose>
Terminates harshly if the monitor process dies before we do.
<Arguments>
readhandle: A file descriptor to the handle of a pipe to the monitor
process.
"""
# Name our self
threading.Thread.__init__(self, name="ProcessChecker")
# Store the handle
self.readhandle = readhandle
def run(self):
# Run forever
while True:
# Read a message
try:
mesg = read_message_from_pipe(self.readhandle)
except Exception, e:
break
# Check for a handler function
if mesg[0] in IPC_HANDLER_FUNCTIONS:
# Invoke the handler function with the data
handler = IPC_HANDLER_FUNCTIONS[mesg[0]]
handler(mesg[1])
# Print a message if there is a message on an unknown channel
else:
print "[WARN] Message on unknown channel from monitor process:", mesg[0]
### We only leave the loop on a fatal error, so we need to exit now
# Write out status information, the monitor process would do this, but its dead.
statusstorage.write_status("Terminated")
print >> sys.stderr, "Monitor process died! Terminating!", repr(e)
harshexit.harshexit(70)
# For *NIX systems, there is an external process, and the
# pid for the actual repy process is stored here
repy_process_id = None
# Forks Repy. The child will continue execution, and the parent
# will become a resource monitor
def do_forked_resource_monitor():
global repy_process_id
# Get a pipe
(readhandle, writehandle) = os.pipe()
# I'll fork a copy of myself
childpid = os.fork()
if childpid == 0:
# We are the child, close the write end of the pipe
os.close(writehandle)
# Start a thread to check on the survival of the monitor process
monitor_process_checker(readhandle).start()
return
else:
# We are the parent, close the read end
os.close(readhandle)
# Store the childpid
repy_process_id = childpid
# Start the nmstatusinterface
nmstatusinterface.launch(repy_process_id)
# Small internal error handler function
def _internal_error(message):
try:
print >> sys.stderr, message
sys.stderr.flush()
except:
pass
# Stop the nmstatusinterface, we don't want any more status updates
nmstatusinterface.stop()
# Kill repy
harshexit.portablekill(childpid)
try:
# Write out status information, repy was Stopped
statusstorage.write_status("Terminated")
except:
pass
try:
# Some OS's require that you wait on the pid at least once
# before they do any accounting
(pid, status) = os.waitpid(childpid,os.WNOHANG)
# Launch the resource monitor, if it fails determine why and restart if necessary
resource_monitor(childpid, writehandle)
except ResourceException, exp:
# Repy exceeded its resource limit, kill it
_internal_error(str(exp)+" Impolitely killing child!")
harshexit.harshexit(98)
except Exception, exp:
# There is some general error...
try:
(pid, status) = os.waitpid(childpid,os.WNOHANG)
except:
# This means that the process is dead
pass
# Check if this is repy exiting
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
sys.exit(0)
else:
_internal_error(str(exp)+" Monitor death! Impolitely killing child!")
raise
def do_forked_resource_monitor_android():
repypid = os.getpid()
(readhandle, writehandle) = os.pipe()
os_api.monitored_process_procfs_stat_file = open("/proc/" +
str(repypid) + "/stat")
if os.fork():
# Parent does not need to write to the pipe
os.close(writehandle)
# Start a thread to check on the survival of the monitor process
monitor_process_checker(readhandle).start()
# Go do important stuff (execute repy code)
return
# The child does not need to read from the pipe
os.close(readhandle)
# Start the nmstatusinterface
nmstatusinterface.launch(repypid)
kill_repy = False
error_msg = False
monitor_exit_code = 0
try:
# Launch the resource monitor, if it fails determine why and restart if necessary
resource_monitor(repypid, writehandle)
except ResourceException, exp:
# Repy exceeded its resource limit, kill it
kill_repy = True
error_msg = str(exp) + " Impolitely killing repy process!"
# XXX LP: Why 98? Not defined in harshexit or elsewhere...
monitor_exit_code = 98
except IOError, e:
# We expect the error to look like this,
# " IOError: [Errno 2] No such file or directory: '/proc/20129/stat' ".
# Then, `resouce_monitor` could not access the monitored process's
# procfs entry. The entry disappears when the monitored process
# ends by itself or is stopped by the nodemanager. (Anyhow, there is
# no more process to kill.)
# Raise if we see another error number, though!
if e.errno != 2:
raise e
except Exception, exp:
try:
os.kill(repypid, 0)
except OSError, e:
# We expect an `OSError: [Errno 3] No such process`.
# There is no more process to kill, so implicitly `pass`.
if e.errno != 3:
raise e
else:
kill_repy = True
error_msg = repr(exp) + " Monitor death! Impolitely killing repy process!"
monitor_exit_code = 98
finally:
if (error_msg):
print >> sys.stderr, error_msg
# Repy/Montior proccesses both _exit, so the thread should be stopped anyway
# nmstatusinterface.stop()
if (kill_repy):
harshexit.portablekill(repypid)
# XXX LP: Is this actually doeing something???
try:
statusstorage.write_status("Terminated")
except:
pass
# The monitor process (child) should always exit this way on android
# because we don't want the child to return back to Java if repy (parent)
# is not alive anymore, which it should not be at this point
harshexit.harshexit(monitor_exit_code)
def resource_monitor(repypid, pipe_handle):
"""
<Purpose>
Function runs in a loop forever, checking resource usage and throttling CPU.
Checks CPU, memory, and disk.
<Arguments>
repypid:
The pid of repy
pipe_handle:
A handle to the pipe to the repy process. Allows sending resource use information.
"""
# Get our pid
ourpid = os.getpid()
# Calculate how often disk should be checked
disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_LINUX / repy_constants.CPU_POLLING_FREQ_LINUX)
current_interval = -1 # What cycle are we on
# Store time of the last interval
last_time = getruntime()
last_CPU_time = 0
resume_time = 0
# Run forever...
while True:
########### Check CPU ###########
# Get elapsed time
currenttime = getruntime()
elapsedtime1 = currenttime - last_time # Calculate against last run
elapsedtime2 = currenttime - resume_time # Calculate since we last resumed repy
elapsedtime = min(elapsedtime1, elapsedtime2) # Take the minimum interval
last_time = currenttime # Save the current time
# Safety check, prevent ZeroDivisionError
if elapsedtime == 0.0:
continue
# Get the total cpu at this point
totalCPU = os_api.get_process_cpu_time(ourpid) # Our own usage
totalCPU += os_api.get_process_cpu_time(repypid) # Repy's usage
# Calculate percentage of CPU used
percentused = (totalCPU - last_CPU_time) / elapsedtime
# Do not throttle for the first interval, wrap around
# Store the totalCPU for the next cycle
if last_CPU_time == 0:
last_CPU_time = totalCPU
continue
else:
last_CPU_time = totalCPU
# Calculate stop time
stoptime = nanny.calculate_cpu_sleep_interval(nanny.get_resource_limit("cpu"), percentused, elapsedtime)
# If we are supposed to stop repy, then suspend, sleep and resume
if stoptime > 0.0:
# They must be punished by stopping
os.kill(repypid, signal.SIGSTOP)
# Sleep until time to resume
time.sleep(stoptime)
# And now they can start back up!
os.kill(repypid, signal.SIGCONT)
# Save the resume time
resume_time = getruntime()
# Send this information as a tuple containing the time repy was stopped and
# for how long it was stopped
write_message_to_pipe(pipe_handle, "repystopped", (currenttime, stoptime))
########### End Check CPU ###########
#
########### Check Memory ###########
# Get how much memory repy is using
memused = os_api.get_process_rss()
# Check if it is using too much memory
if memused > nanny.get_resource_limit("memory"):
raise ResourceException, "Memory use '"+str(memused)+"' over limit '"+str(nanny.get_resource_limit("memory"))+"'."
########### End Check Memory ###########
#
########### Check Disk Usage ###########
# Increment our current cycle
current_interval += 1;
# Check if it is time to check the disk usage
if (current_interval % disk_interval) == 0:
# Reset the interval
current_interval = 0
# Calculate disk used
diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)
# Raise exception if we are over limit
if diskused > nanny.get_resource_limit("diskused"):
raise ResourceException, "Disk use '"+str(diskused)+"' over limit '"+str(nanny.get_resource_limit("diskused"))+"'."
# Send the disk usage information, raw bytes used
write_message_to_pipe(pipe_handle, "diskused", diskused)
########### End Check Disk ###########
# Sleep before the next iteration
time.sleep(repy_constants.CPU_POLLING_FREQ_LINUX)
########### functions that help me figure out the os type ###########
# Calculates the system granularity
def calculate_granularity():
global granularity
if ostype in ["Windows"]:
# The Granularity of getTickCount is 1 millisecond
granularity = pow(10,-3)
elif ostype == "Linux":
# We don't know if the granularity is correct yet
correct_granularity = False
# How many times have we tested
tests = 0
# Loop while the granularity is incorrect, up to 10 times
while not correct_granularity and tests <= 10:
current_granularity = os_api.get_uptime_granularity()
uptime_pre = os_api.get_system_uptime()
time.sleep(current_granularity / 10)
uptime_post = os_api.get_system_uptime()
diff = uptime_post - uptime_pre
correct_granularity = int(diff / current_granularity) == (diff / current_granularity)
tests += 1
granularity = current_granularity
elif ostype == "Darwin":
granularity = os_api.get_uptime_granularity()
# Call init_ostype!!!
harshexit.init_ostype()
ostype = harshexit.ostype
osrealtype = harshexit.osrealtype
# Import the proper system wide API
if osrealtype == "Linux":
import linux_api as os_api
elif osrealtype == "Darwin":
import darwin_api as os_api
elif osrealtype == "FreeBSD":
import freebsd_api as os_api
elif ostype == "Windows":
# There is no real reason to do this, since windows is imported separately
import windows_api as os_api
elif osrealtype == "Android":
import android_api as os_api
else:
# This is a non-supported OS
raise UnsupportedSystemException, "The current Operating System is not supported! Fatal Error."
# Set granularity
calculate_granularity()
# For Windows, we need to initialize time.clock()
if ostype in ["Windows"]:
time.clock()
# Initialize getruntime for other platforms
else:
# Set the starttime to the initial uptime
starttime = getruntime()
last_uptime = starttime
# Reset elapsed time
elapsedtime = 0
|
import abc
from rsocket.rsocket import RSocket
class LoadBalancerStrategy(metaclass=abc.ABCMeta):
@abc.abstractmethod
def select(self) -> RSocket:
...
@abc.abstractmethod
async def connect(self):
...
@abc.abstractmethod
async def close(self):
...
|
'''where you can set absolute and relative path used in the package'''
import os
import numpy as np
root = os.path.dirname(os.path.abspath(__file__))
M2015_file = os.path.join(root, '../aux/rhoTable_Mobley2015.csv')
M1999_file = os.path.join(root, '../aux/rhoTable_Mobley1999.csv')
rhosoaa_fine_file = os.path.join(root, '../aux/surface_reflectance_factor_rho_fine_aerosol_rg0.06_sig0.46.csv')
rhosoaa_coarse_file = os.path.join(root, '../aux/surface_reflectance_factor_rho_coarse_aerosol_rg0.60_sig0.60.csv')
iopw_file = os.path.join(root, '../aux/water_coef.txt')
F0_file = os.path.join(root, '../aux/Thuillier_2003_0.3nm.dat')
# set common wavelengths on which spectra are reprojected
# default 320nm to 950 nm each wl_step nm
wl_step = 3
wl_common = np.arange(320,950+wl_step,wl_step)
# wavelength indexes for iwr plotting
# for close red/IR
# idx_list_for_plot=(165,170,175,180,185,190,195,200,205)
# through visible/IR
idx_list_for_plot=(28, 37, 51, 71, 91, 105, 130, 140, 170) |
import sys
class DAOException(Exception):
def __init__(self, parent, error):
print(sys.exc_info()[0])
print('Parent Error : {}'.format(parent))
print("ERROR {}:".format(error.args[0]))
raise error
|
# Generated by Django 3.1.8 on 2021-04-16 11:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('lockers', '0003_personalqr'),
]
operations = [
migrations.AlterField(
model_name='personalqr',
name='recipient',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='personalqr',
name='uuid',
field=models.CharField(default='', max_length=50, unique=True),
),
migrations.AlterField(
model_name='qr',
name='uuid',
field=models.CharField(default='', max_length=50, unique=True),
),
]
|
# DADataType.py
from docassemble.base.util import DAObject
__all__ = ['DADTBoolean', 'DADTContinue', 'DADTNumber', 'DADTString', 'DADTEmail', 'DADTDate', 'DADTTime', 'DADTDateTime', 'DADTYesNoMaybe', 'DADTFile', 'DADTEnum', 'DADTObjectRef']
class DADataType(DAObject):
def init(self, input_type="default", *pargs, **kwargs):
self.intput_type = input_type
super().init(*pargs, **kwargs)
def __str__(self):
return str(self.value)
def __dir__(self):
return dir(self.value)
def __contains__(self, item):
return self.value.__contains__(item)
def __iter__(self):
return self.value.__iter__()
def __len__(self):
return len(self.value)
def __reversed__(self):
return reversed(self.value)
def __getitem__(self, index):
return self.value.__getitem__(index)
def __repr__(self):
return repr(self.value)
def __add__(self, other):
return self.value.__add__(other)
def __sub__(self, other):
return self.value.__sub__(other)
def __mul__(self, other):
return self.value.__mul__(other)
def __floordiv__(self, other):
return self.value.__floordiv__(other)
def __mod__(self, other):
return self.value.__mod__(other)
def __divmod__(self, other):
return self.value.__divmod__(other)
def __pow__(self, other):
return self.value.__pow__(other)
def __lshift__(self, other):
return self.value.__lshift__(other)
def __rshift__(self, other):
return self.value.__rshift__(other)
def __and__(self, other):
return self.value.__and__(other)
def __xor__(self, other):
return self.value.__xor__(other)
def __or__(self, other):
return self.value.__or__(other)
def __div__(self, other):
return self.value.__div__(other)
def __truediv__(self, other):
return self.value.__truediv__(other)
def __radd__(self, other):
return self.value.__radd__(other)
def __rsub__(self, other):
return self.value.__rsub__(other)
def __rmul__(self, other):
return self.value.__rmul__(other)
def __rdiv__(self, other):
return self.value.__rdiv__(other)
def __rtruediv__(self, other):
return self.value.__rtruediv__(other)
def __rfloordiv__(self, other):
return self.value.__rfloordiv__(other)
def __rmod__(self, other):
return self.value.__rmod__(other)
def __rdivmod__(self, other):
return self.value.__rdivmod__(other)
def __rpow__(self, other):
return self.value.__rpow__(other)
def __rlshift__(self, other):
return self.value.__rlshift__(other)
def __rrshift__(self, other):
return self.value.__rrshift__(other)
def __rand__(self, other):
return self.value.__rand__(other)
def __ror__(self, other):
return self.value.__ror__(other)
def __neg__(self):
return self.value.__neg__()
def __pos__(self):
return self.value.__pos__()
def __abs__(self):
return abs(self.value)
def __invert__(self):
return self.value.__invert__()
def __complex__(self):
return complex(self.value)
def __int__(self):
return int(self.value)
def __long__(self):
return long(self.value)
def __float__(self):
return float(self.value)
def __oct__(self):
return self.octal_value
def __hex__(self):
return hex(self.value)
def __index__(self):
return self.value.__index__()
def __le__(self, other):
return self.value.__le__(other)
def __ge__(self, other):
return self.value.__ge__(other)
def __gt__(self, other):
return self.value.__gt__(other)
def __lt__(self, other):
return self.value.__lt__(other)
def __eq__(self, other):
return self.value.__eq__(other)
def __ne__(self, other):
return self.value.__ne__(other)
def __hash__(self):
return hash(self.value)
def __bool__(self):
return bool(self.value)
class DADTBoolean(DADataType):
#TODO Add validation for input types.
pass
class DADTContinue(DADataType):
pass
class DADTNumber(DADataType):
pass
class DADTString(DADataType):
pass
class DADTEmail(DADataType):
pass
class DADTDate(DADataType):
pass
class DADTTime(DADataType):
pass
class DADTDateTime(DADataType):
pass
class DADTYesNoMaybe(DADataType):
pass
class DADTFile(DADataType):
pass
class DADTEnum(DADataType):
pass
class DADTObjectRef(DADataType):
pass |
# file: SubmissionName.py
import re
import Loghelper
'''
Represents the "meta data" of a submission file or directory name
'''
class SubmissionName:
def __init__(self, submissionName):
# subnamePattern1 = "(?P<task>\w+)_Level(?P<level>\w)_(?P<student>[_\w]+)\.zip"
# Der "Trick des Jahres" - non greedy dank *? anstelle von +, damit der Vorname nicht dem Aufgabenname zugeordnet wird
# subnamePattern = "(?P<task>\w*?)_(?P<student>[_\w]+)\.zip"
# subnamePattern = "(?P<exercise>\w*?)_(?P<first>[\w]+)_(?P<last>[\w]+)\.zip"
subnamePattern1 = "(?P<exercise>\w*?)_(?P<first>[\w]+)_(?P<last>[\w]+)"
subnamePattern2 = "(?P<exercise>\w*?)_(?P<name>[\w]+)"
# nameElements = list(re.finditer(subnamePattern, submissionFile))
# findall only returns a list - use finditer to get the named capture groups
# eg. list(re.finditer(pa, fiName))[0].group("exercise")
# does the exercise_firstname_lastname-Pattern?
if len(list(re.finditer(subnamePattern1, submissionName))) > 0:
matchList = list(re.finditer(subnamePattern1, submissionName))[0].groups()
# all matches matched?
if len(matchList) == 3:
exercise = matchList[0]
first = matchList[1]
last = matchList[2]
self.exercise = exercise
self.student = f"{first}_{last}"
# does the exercise_name pattern fits?
elif len(list(re.finditer(subnamePattern2, submissionName))) > 0:
matchList = list(re.finditer(subnamePattern2, submissionName))[0].groups()
# all matches matched?
if len(matchList) == 2:
exercise = matchList[0]
lastname = matchList[1]
self.exercise = exercise
self.student = f"{lastname}"
else:
# should not happen but...
self.exercise = ""
self.student = ""
infoMessage = f"SubmissionName: {submissionName} does not match the name patterns"
Loghelper.logError(infoMessage)
def __repr__(self):
return f"Student: {self.student} Exercise: {self.exercise}"
|
import os
from .message import Message
from ..limits import get_limits
#receives simple newline terminated message
class LineMessage(Message):
def __init__(self):
Message.__init__(self)
def dataReceived(self, data):
self._buffer += data
if len(self._buffer) > get_limits().max_line_message_length:
self.messageError(self._buffer)
self._buffer = b''
if self._buffer.endswith(str.encode(os.linesep)):
self.messageReceived(self._buffer.rstrip())
self._buffer = b''
def connectionLost(self, reason=None):
self._buffer = b''
|
#
# This class was auto-generated from the API references found at
# https://support.direct.ingenico.com/documentation/api/reference/
#
from ingenico.direct.sdk.api_resource import ApiResource
from ingenico.direct.sdk.response_exception import ResponseException
from ingenico.direct.sdk.domain.error_response import ErrorResponse
from ingenico.direct.sdk.domain.get_payment_products_response import GetPaymentProductsResponse
from ingenico.direct.sdk.domain.payment_product import PaymentProduct
from ingenico.direct.sdk.domain.payment_product_networks_response import PaymentProductNetworksResponse
from ingenico.direct.sdk.domain.product_directory import ProductDirectory
from ingenico.direct.sdk.merchant.products.i_products_client import IProductsClient
class ProductsClient(ApiResource, IProductsClient):
"""
Products client. Thread-safe.
"""
def __init__(self, parent, path_context):
"""
:param parent: :class:`ingenico.direct.sdk.api_resource.ApiResource`
:param path_context: dict[str, str]
"""
super(ProductsClient, self).__init__(parent, path_context)
def get_payment_products(self, query, context=None):
"""
Resource /v2/{merchantId}/products - Get payment products
See also https://support.direct.ingenico.com/documentation/api/reference#operation/GetPaymentProducts
:param query: :class:`ingenico.direct.sdk.merchant.products.get_payment_products_params.GetPaymentProductsParams`
:param context: :class:`ingenico.direct.sdk.call_context.CallContext`
:return: :class:`ingenico.direct.sdk.domain.get_payment_products_response.GetPaymentProductsResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: DirectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
uri = self._instantiate_uri("/v2/{merchantId}/products", None)
try:
return self._communicator.get(
uri,
self._client_headers,
query,
GetPaymentProductsResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def get_payment_product(self, payment_product_id, query, context=None):
"""
Resource /v2/{merchantId}/products/{paymentProductId} - Get payment product
See also https://support.direct.ingenico.com/documentation/api/reference#operation/GetPaymentProduct
:param payment_product_id: int
:param query: :class:`ingenico.direct.sdk.merchant.products.get_payment_product_params.GetPaymentProductParams`
:param context: :class:`ingenico.direct.sdk.call_context.CallContext`
:return: :class:`ingenico.direct.sdk.domain.payment_product.PaymentProduct`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: DirectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentProductId": str(payment_product_id),
}
uri = self._instantiate_uri("/v2/{merchantId}/products/{paymentProductId}", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
query,
PaymentProduct,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def get_product_directory(self, payment_product_id, query, context=None):
"""
Resource /v2/{merchantId}/products/{paymentProductId}/directory - Get payment product directory
See also https://support.direct.ingenico.com/documentation/api/reference#operation/GetProductDirectoryApi
:param payment_product_id: int
:param query: :class:`ingenico.direct.sdk.merchant.products.get_product_directory_params.GetProductDirectoryParams`
:param context: :class:`ingenico.direct.sdk.call_context.CallContext`
:return: :class:`ingenico.direct.sdk.domain.product_directory.ProductDirectory`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: DirectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentProductId": str(payment_product_id),
}
uri = self._instantiate_uri("/v2/{merchantId}/products/{paymentProductId}/directory", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
query,
ProductDirectory,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
def get_payment_product_networks(self, payment_product_id, query, context=None):
"""
Resource /v2/{merchantId}/products/{paymentProductId}/networks - Get payment product networks
See also https://support.direct.ingenico.com/documentation/api/reference#operation/GetPaymentProductNetworks
:param payment_product_id: int
:param query: :class:`ingenico.direct.sdk.merchant.products.get_payment_product_networks_params.GetPaymentProductNetworksParams`
:param context: :class:`ingenico.direct.sdk.call_context.CallContext`
:return: :class:`ingenico.direct.sdk.domain.payment_product_networks_response.PaymentProductNetworksResponse`
:raise: ValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
:raise: AuthorizationException if the request was not allowed (HTTP status code 403)
:raise: ReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
or there was a conflict (HTTP status code 404, 409 or 410)
:raise: DirectException if something went wrong at the Ingenico ePayments platform,
the Ingenico ePayments platform was unable to process a message from a downstream partner/acquirer,
or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
:raise: ApiException if the Ingenico ePayments platform returned any other error
"""
path_context = {
"paymentProductId": str(payment_product_id),
}
uri = self._instantiate_uri("/v2/{merchantId}/products/{paymentProductId}/networks", path_context)
try:
return self._communicator.get(
uri,
self._client_headers,
query,
PaymentProductNetworksResponse,
context)
except ResponseException as e:
error_type = ErrorResponse
error_object = self._communicator.marshaller.unmarshal(e.body, error_type)
raise self._create_exception(e.status_code, e.body, error_object, context)
|
"""
Fibra is a sophisticated scheduler for cooperative tasks.
It's a bit lke Stackless Python. It uses Python generator functions
to create tasks which can be iterated.
"""
from schedule import *
from handlers.sleep import Sleep
from handlers.nonblock import Unblock
from handlers.tasks import Async, Return, Finished, Suspend, Self
from handlers.tube import Tube, EmptyTube, ClosedTube
from handlers.io import Read, Write
|
from dash import html
from dash import dcc
import feffery_antd_components as fac
import feffery_utils_components as fuc
import callbacks.AntdDateRangePicker
docs_content = html.Div(
[
html.Div(
[
html.H2(
'AntdDateRangePicker(id, className, style, *args, **kwargs)',
style={
'borderLeft': '4px solid grey',
'padding': '3px 0 3px 10px',
'backgroundColor': '#f5f5f5'
}
),
fac.AntdBackTop(
containerId='docs-content',
duration=0.6
),
html.Span(
'主要参数说明:',
id='主要参数说明',
style={
'borderLeft': '4px solid grey',
'padding': '3px 0 3px 10px',
'backgroundColor': '#f5f5f5',
'fontWeight': 'bold',
'fontSize': '1.2rem'
}
),
fuc.FefferyMarkdown(
markdownStr=open('documents/AntdDateRangePicker.md', encoding='utf-8').read()
),
html.Div(
html.Span(
'使用示例',
id='使用示例',
style={
'borderLeft': '4px solid grey',
'padding': '3px 0 3px 10px',
'backgroundColor': '#f5f5f5',
'fontWeight': 'bold',
'fontSize': '1.2rem'
}
),
style={
'marginBottom': '10px'
}
),
html.Div(
[
fac.AntdDateRangePicker(),
fac.AntdDivider(
'基础的日期范围选择控件',
lineColor='#f0f0f0',
innerTextOrientation='left'
),
fac.AntdCollapse(
fuc.FefferySyntaxHighlighter(
showLineNumbers=True,
showInlineLineNumbers=True,
language='python',
codeStyle='coy-without-shadows',
codeString='''fac.AntdDateRangePicker()'''
),
title='点击查看代码',
is_open=False,
ghost=True
)
],
style={
'marginBottom': '40px',
'padding': '10px 10px 20px 10px',
'border': '1px solid #f0f0f0'
},
id='基础的日期范围选择控件',
className='div-highlight'
),
html.Div(
[
fac.AntdDateRangePicker(showTime=True),
fac.AntdDivider(
'添加时间选择',
lineColor='#f0f0f0',
innerTextOrientation='left'
),
fac.AntdCollapse(
fuc.FefferySyntaxHighlighter(
showLineNumbers=True,
showInlineLineNumbers=True,
language='python',
codeStyle='coy-without-shadows',
codeString='''fac.AntdDateRangePicker(showTime=True)'''
),
title='点击查看代码',
is_open=False,
ghost=True
)
],
style={
'marginBottom': '40px',
'padding': '10px 10px 20px 10px',
'border': '1px solid #f0f0f0'
},
id='添加时间选择',
className='div-highlight'
),
html.Div(
[
fac.AntdDateRangePicker(placeholderStart='日期起点', placeholderEnd='日期终点'),
fac.AntdDivider(
'自定义空白填充文字',
lineColor='#f0f0f0',
innerTextOrientation='left'
),
fac.AntdCollapse(
fuc.FefferySyntaxHighlighter(
showLineNumbers=True,
showInlineLineNumbers=True,
language='python',
codeStyle='coy-without-shadows',
codeString='''fac.AntdDateRangePicker(placeholderStart='日期起点', placeholderEnd='日期终点')'''
),
title='点击查看代码',
is_open=False,
ghost=True
)
],
style={
'marginBottom': '40px',
'padding': '10px 10px 20px 10px',
'border': '1px solid #f0f0f0'
},
id='自定义空白填充文字',
className='div-highlight'
),
html.Div(
[
fac.AntdDateRangePicker(picker='week'),
fac.AntdDateRangePicker(picker='month'),
fac.AntdDateRangePicker(picker='quarter', placeholderStart='开始季度', placeholderEnd='结束季度'),
fac.AntdDateRangePicker(picker='year'),
fac.AntdDivider(
'修改时间粒度',
lineColor='#f0f0f0',
innerTextOrientation='left'
),
fac.AntdCollapse(
fuc.FefferySyntaxHighlighter(
showLineNumbers=True,
showInlineLineNumbers=True,
language='python',
codeStyle='coy-without-shadows',
codeString='''
fac.AntdDateRangePicker(picker='week'),
fac.AntdDateRangePicker(picker='month'),
fac.AntdDateRangePicker(picker='quarter', placeholderStart='开始季度', placeholderEnd='结束季度'),
fac.AntdDateRangePicker(picker='year')'''
),
title='点击查看代码',
is_open=False,
ghost=True
)
],
style={
'marginBottom': '40px',
'padding': '10px 10px 20px 10px',
'border': '1px solid #f0f0f0'
},
id='修改时间粒度',
className='div-highlight'
),
html.Div(
[
fac.AntdDateRangePicker(disabledStart=True),
fac.AntdDateRangePicker(disabledEnd=True),
fac.AntdDivider(
'禁用开始或结束输入框',
lineColor='#f0f0f0',
innerTextOrientation='left'
),
fac.AntdCollapse(
fuc.FefferySyntaxHighlighter(
showLineNumbers=True,
showInlineLineNumbers=True,
language='python',
codeStyle='coy-without-shadows',
codeString='''
fac.AntdDateRangePicker(disabledStart=True),
fac.AntdDateRangePicker(disabledEnd=True)'''
),
title='点击查看代码',
is_open=False,
ghost=True
)
],
style={
'marginBottom': '40px',
'padding': '10px 10px 20px 10px',
'border': '1px solid #f0f0f0'
},
id='禁用开始或结束输入框',
className='div-highlight'
),
html.Div(
[
fac.AntdDateRangePicker(id='date-range-picker-demo'),
html.Br(),
fac.AntdSpin(
html.Em(id='date-range-picker-demo-output'),
text='回调中'
),
fac.AntdDivider(
'回调示例',
lineColor='#f0f0f0',
innerTextOrientation='left'
),
fac.AntdCollapse(
fuc.FefferySyntaxHighlighter(
showLineNumbers=True,
showInlineLineNumbers=True,
language='python',
codeStyle='coy-without-shadows',
codeString='''
fac.AntdDateRangePicker(id='date-range-picker-demo'),
html.Br(),
html.Em(id='date-range-picker-demo-output')
...
@app.callback(
Output('date-range-picker-demo-output', 'children'),
[Input('date-range-picker-demo', 'selectedStartDate'),
Input('date-range-picker-demo', 'selectedEndDate')],
prevent_initial_call=True
)
def date_picker_callback_demo(selectedStartDate, selectedEndDate):
if selectedStartDate and selectedEndDate:
return f'{selectedStartDate} ~ {selectedEndDate}'
return dash.no_update'''
),
title='点击查看代码',
is_open=False,
ghost=True
)
],
style={
'marginBottom': '40px',
'padding': '10px 10px 20px 10px',
'border': '1px solid #f0f0f0'
},
id='回调示例',
className='div-highlight'
),
html.Div(style={'height': '100px'})
],
style={
'flex': 'auto'
}
),
html.Div(
fac.AntdAnchor(
linkDict=[
{'title': '主要参数说明', 'href': '#主要参数说明'},
{
'title': '使用示例',
'href': '#使用示例',
'children': [
{'title': '基础的日期范围选择控件', 'href': '#基础的日期范围选择控件'},
{'title': '添加时间选择', 'href': '#添加时间选择'},
{'title': '自定义空白填充文字', 'href': '#自定义空白填充文字'},
{'title': '修改时间粒度', 'href': '#修改时间粒度'},
{'title': '禁用开始或结束输入框', 'href': '#禁用开始或结束输入框'},
{'title': '回调示例', 'href': '#回调示例'},
]
},
],
containerId='docs-content',
targetOffset=200
),
style={
'flex': 'none',
'margin': '20px'
}
)
],
style={
'display': 'flex'
}
)
|
from trinity.exceptions import (
BaseTrinityError,
)
class EventBusNotReady(BaseTrinityError):
"""
Raised when a plugin tried to access an :class:`~lahja.eventbus.EventBus` before the plugin
had received its :meth:`~trinity.extensibility.plugin.BasePlugin.on_ready` call.
"""
pass
class InvalidPluginStatus(BaseTrinityError):
"""
Raised when it was attempted to perform an action while the current
:class:`~trinity.extensibility.plugin.PluginStatus` does not allow to perform such action.
"""
pass
class UnsuitableShutdownError(BaseTrinityError):
"""
Raised when :meth:`~trinity.extensibility.plugin_manager.PluginManager.shutdown` was called on
a :class:`~trinity.extensibility.plugin_manager.PluginManager` instance that operates in the
:class:`~trinity.extensibility.plugin_manager.MainAndIsolatedProcessScope` or when
:meth:`~trinity.extensibility.plugin.PluginManager.shutdown_blocking` was called on a
:class:`~trinity.extensibility.plugin_manager.PluginManager` instance that operates in the
:class:`~trinity.extensibility.plugin_manager.SharedProcessScope`.
"""
pass
|
def funcaoQueFazAlgo(a, b):
print('Algo!!')
def outra_funcao(arg1, arg2):
print("Outra funcao!", arg1+arg2)
funcaoQueFazAlgo(1, 2)
outra_funcao(3, 4)
|
"""Defines Python-version calculation "representation" objects"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import sys
import time as _time
import math as _math
import numpy as _np
import scipy.sparse as _sps
import itertools as _itertools
import functools as _functools
from ...tools import mpitools as _mpit
from ...tools import slicetools as _slct
from ...tools import matrixtools as _mt
from ...tools import listtools as _lt
from ...tools import optools as _gt
from ...tools.matrixtools import _fas
from scipy.sparse.linalg import LinearOperator
LARGE = 1000000000
# a large number such that LARGE is
# a very high term weight which won't help (at all) a
# path get included in the selected set of paths.
SMALL = 1e-5
# a number which is used in place of zero within the
# product of term magnitudes to keep a running path
# magnitude from being zero (and losing memory of terms).
# DEBUG!!!
DEBUG_FCOUNT = 0
class DMStateRep(object):
def __init__(self, data, reducefix=0):
assert(data.dtype == _np.dtype('d'))
if reducefix == 0:
self.base = data
else:
# because serialization of numpy array flags is borked (around Numpy v1.16), we need to copy data
# (so self.base *owns* it's data) and manually convey the writeable flag.
self.base = _np.require(data.copy(), requirements=['OWNDATA', 'C_CONTIGUOUS'])
self.base.flags.writeable = True if reducefix == 1 else False
def __reduce__(self):
reducefix = 1 if self.base.flags.writeable else 2
return (DMStateRep, (self.base, reducefix))
def copy_from(self, other):
self.base = other.base.copy()
def todense(self):
return self.base
@property
def dim(self):
return len(self.base)
def __str__(self):
return str(self.base)
class DMEffectRep(object):
def __init__(self, dim):
self.dim = dim
def probability(self, state):
raise NotImplementedError()
class DMEffectRep_Dense(DMEffectRep):
def __init__(self, data):
assert(data.dtype == _np.dtype('d'))
self.base = data
super(DMEffectRep_Dense, self).__init__(len(self.base))
def __reduce__(self):
return (DMEffectRep_Dense, (self.base,))
def probability(self, state):
# can assume state is a DMStateRep
return _np.dot(self.base, state.base) # not vdot b/c *real* data
class DMEffectRep_TensorProd(DMEffectRep):
def __init__(self, kron_array, factor_dims, nfactors, max_factor_dim, dim):
# int dim = _np.product(factor_dims) -- just send as argument for speed?
assert(dim == _np.product(factor_dims))
self.kron_array = kron_array
self.factor_dims = factor_dims
self.nfactors = nfactors
self.max_factor_dim = max_factor_dim # Unused
super(DMEffectRep_TensorProd, self).__init__(dim)
def __reduce__(self):
return (DMEffectRep_TensorProd,
(self.kron_array, self.factor_dims, self.nfactors, self.max_factor_dim, self.dim))
def todense(self, outvec):
N = self.dim
#Put last factor at end of outvec
k = self.nfactors - 1 # last factor
off = N - self.factor_dims[k] # offset into outvec
for i in range(self.factor_dims[k]):
outvec[off + i] = self.kron_array[k, i]
sz = self.factor_dims[k]
#Repeatedly scale© last "sz" elements of outputvec forward
# (as many times as there are elements in the current factor array)
# - but multiply *in-place* the last "sz" elements.
for k in range(self.nfactors - 2, -1, -1): # for all but the last factor
off = N - sz * self.factor_dims[k]
endoff = N - sz
#For all but the final element of self.kron_array[k,:],
# mult© final sz elements of outvec into position
for j in range(self.factor_dims[k] - 1):
mult = self.kron_array[k, j]
for i in range(sz):
outvec[off + i] = mult * outvec[endoff + i]
off += sz
#Last element: in-place mult
#assert(off == endoff)
mult = self.kron_array[k, self.factor_dims[k] - 1]
for i in range(sz):
outvec[endoff + i] *= mult
sz *= self.factor_dims[k]
return outvec
def probability(self, state): # allow scratch to be passed in?
scratch = _np.empty(self.dim, 'd')
Edense = self.todense(scratch)
return _np.dot(Edense, state.base) # not vdot b/c data is *real*
class DMEffectRep_Computational(DMEffectRep):
def __init__(self, zvals, dim):
# int dim = 4**len(zvals) -- just send as argument for speed?
assert(dim == 4**len(zvals))
assert(len(zvals) <= 64), "Cannot create a Computational basis rep with >64 qubits!"
# Current storage of computational basis states converts zvals -> 64-bit integer
base = 1
self.zvals_int = 0
for v in zvals:
assert(v in (0, 1)), "zvals must contain only 0s and 1s"
self.zvals_int += base * v
base *= 2 # or left shift?
self.zvals = zvals
self.nfactors = len(zvals) # (or nQubits)
self.abs_elval = 1 / (_np.sqrt(2)**self.nfactors)
super(DMEffectRep_Computational, self).__init__(dim)
def __reduce__(self):
return (DMEffectRep_Computational, (self.zvals, self.dim))
def parity(self, x):
"""recursively divide the (64-bit) integer into two equal
halves and take their XOR until only 1 bit is left """
x = (x & 0x00000000FFFFFFFF) ^ (x >> 32)
x = (x & 0x000000000000FFFF) ^ (x >> 16)
x = (x & 0x00000000000000FF) ^ (x >> 8)
x = (x & 0x000000000000000F) ^ (x >> 4)
x = (x & 0x0000000000000003) ^ (x >> 2)
x = (x & 0x0000000000000001) ^ (x >> 1)
return x & 1 # return the last bit (0 or 1)
def todense(self, outvec, trust_outvec_sparsity=False):
# when trust_outvec_sparsity is True, assume we only need to fill in the
# non-zero elements of outvec (i.e. that outvec is already zero wherever
# this vector is zero).
if not trust_outvec_sparsity:
outvec[:] = 0 # reset everything to zero
N = self.nfactors
# there are nQubits factors
# each factor (4-element, 1Q dmvec) has 2 zero elements and 2 nonzero ones
# loop is over all non-zero elements of the final outvec by looping over
# all the sets of *entirely* nonzero elements from the factors.
# Let the two possible nonzero elements of the k-th factor be represented
# by the k-th bit of `finds` below, which ranges from 0 to 2^nFactors-1
for finds in range(2**N):
#Create the final index (within outvec) corresponding to finds
# assume, like tensorprod, that factor ordering == kron ordering
# so outvec = kron( factor[0], factor[1], ... factor[N-1] ).
# Let factorDim[k] == 4**(N-1-k) be the stride associated with the k-th index
# Whenever finds[bit k] == 0 => finalIndx += 0*factorDim[k]
# finds[bit k] == 1 => finalIndx += 3*factorDim[k] (3 b/c factor's 2nd nonzero el is at index 3)
finalIndx = sum([3 * (4**(N - 1 - k)) for k in range(N) if bool(finds & (1 << k))])
#Determine the sign of this element (the element is either +/- (1/sqrt(2))^N )
# A minus sign is picked up whenever finds[bit k] == 1 (which means we're looking
# at the index=3 element of the factor vec) AND self.zvals_int[bit k] == 1
# (which means it's a [1 0 0 -1] state rather than a [1 0 0 1] state).
# Since we only care whether the number of minus signs is even or odd, we can
# BITWISE-AND finds with self.zvals_int (giving an integer whose binary-expansion's
# number of 1's == the number of minus signs) and compute the parity of this.
minus_sign = self.parity(finds & self.zvals_int)
outvec[finalIndx] = -self.abs_elval if minus_sign else self.abs_elval
return outvec
def probability(self, state):
scratch = _np.empty(self.dim, 'd')
Edense = self.todense(scratch)
return _np.dot(Edense, state.base) # not vdot b/c data is *real*
class DMEffectRep_Errgen(DMEffectRep): # TODO!! Need to make SV version
def __init__(self, errgen_oprep, effect_rep, errgen_id):
dim = effect_rep.dim
self.errgen_rep = errgen_oprep
self.effect_rep = effect_rep
self.errgen_id = errgen_id
super(DMEffectRep_Errgen, self).__init__(dim)
def __reduce__(self):
return (DMEffectRep_Errgen, (self.errgen_rep, self.effect_rep, self.errgen_id))
def probability(self, state):
state = self.errgen_rep.acton(state) # *not* acton_adjoint
return self.effect_rep.probability(state)
class DMOpRep(object):
def __init__(self, dim):
self.dim = dim
def acton(self, state):
raise NotImplementedError()
def adjoint_acton(self, state):
raise NotImplementedError()
def aslinearoperator(self):
def mv(v):
if v.ndim == 2 and v.shape[1] == 1: v = v[:, 0]
in_state = DMStateRep(_np.ascontiguousarray(v, 'd'))
return self.acton(in_state).todense()
def rmv(v):
if v.ndim == 2 and v.shape[1] == 1: v = v[:, 0]
in_state = DMStateRep(_np.ascontiguousarray(v, 'd'))
return self.adjoint_acton(in_state).todense()
return LinearOperator((self.dim, self.dim), matvec=mv, rmatvec=rmv) # transpose, adjoint, dot, matmat?
class DMOpRep_Dense(DMOpRep):
def __init__(self, data, reducefix=0):
if reducefix == 0:
self.base = data
else:
# because serialization of numpy array flags is borked (around Numpy v1.16), we need to copy data
# (so self.base *owns* it's data) and manually convey the writeable flag.
self.base = _np.require(data.copy(), requirements=['OWNDATA', 'C_CONTIGUOUS'])
self.base.flags.writeable = True if reducefix == 1 else False
super(DMOpRep_Dense, self).__init__(self.base.shape[0])
def __reduce__(self):
reducefix = 1 if self.base.flags.writeable else 2
return (DMOpRep_Dense, (self.base, reducefix))
def acton(self, state):
return DMStateRep(_np.dot(self.base, state.base))
def adjoint_acton(self, state):
return DMStateRep(_np.dot(self.base.T, state.base)) # no conjugate b/c *real* data
def __str__(self):
return "DMOpRep_Dense:\n" + str(self.base)
class DMOpRep_Embedded(DMOpRep):
def __init__(self, embedded_op, numBasisEls, actionInds,
blocksizes, embedded_dim, nComponentsInActiveBlock,
iActiveBlock, nBlocks, dim):
self.embedded = embedded_op
self.numBasisEls = numBasisEls
self.actionInds = actionInds
self.blocksizes = blocksizes
numBasisEls_noop_blankaction = numBasisEls.copy()
for i in actionInds: numBasisEls_noop_blankaction[i] = 1
self.basisInds_noop_blankaction = [list(range(n)) for n in numBasisEls_noop_blankaction]
# multipliers to go from per-label indices to tensor-product-block index
# e.g. if map(len,basisInds) == [1,4,4] then multipliers == [ 16 4 1 ]
self.multipliers = _np.array(_np.flipud(_np.cumprod([1] + list(
reversed(list(numBasisEls[1:]))))), _np.int64)
self.basisInds_action = [list(range(numBasisEls[i])) for i in actionInds]
self.embeddedDim = embedded_dim
self.nComponents = nComponentsInActiveBlock
self.iActiveBlock = iActiveBlock
self.nBlocks = nBlocks
self.offset = sum(blocksizes[0:iActiveBlock])
super(DMOpRep_Embedded, self).__init__(dim)
def __reduce__(self):
return (DMOpRep_Embedded, (self.embedded,
self.numBasisEls, self.actionInds,
self.blocksizes, self.embeddedDim,
self.nComponents, self.iActiveBlock,
self.nBlocks, self.dim))
def _acton_other_blocks_trivially(self, output_state, state):
offset = 0
for iBlk, blockSize in enumerate(self.blocksizes):
if iBlk != self.iActiveBlock:
output_state.base[offset:offset + blockSize] = state.base[offset:offset + blockSize] # identity op
offset += blockSize
def acton(self, state):
output_state = DMStateRep(_np.zeros(state.base.shape, 'd'))
offset = self.offset # if relToBlock else self.offset (relToBlock == False here)
#print("DB REPLIB ACTON: ",self.basisInds_noop_blankaction)
#print("DB REPLIB ACTON: ",self.basisInds_action)
#print("DB REPLIB ACTON: ",self.multipliers)
for b in _itertools.product(*self.basisInds_noop_blankaction): # zeros in all action-index locations
vec_index_noop = _np.dot(self.multipliers, tuple(b))
inds = []
for op_b in _itertools.product(*self.basisInds_action):
vec_index = vec_index_noop
for i, bInd in zip(self.actionInds, op_b):
#b[i] = bInd #don't need to do this; just update vec_index:
vec_index += self.multipliers[i] * bInd
inds.append(offset + vec_index)
embedded_instate = DMStateRep(state.base[inds])
embedded_outstate = self.embedded.acton(embedded_instate)
output_state.base[inds] += embedded_outstate.base
#act on other blocks trivially:
self._acton_other_blocks_trivially(output_state, state)
return output_state
def adjoint_acton(self, state):
""" Act the adjoint of this gate map on an input state """
#NOTE: Same as acton except uses 'adjoint_acton(...)' below
output_state = DMStateRep(_np.zeros(state.base.shape, 'd'))
offset = self.offset # if relToBlock else self.offset (relToBlock == False here)
for b in _itertools.product(*self.basisInds_noop_blankaction): # zeros in all action-index locations
vec_index_noop = _np.dot(self.multipliers, tuple(b))
inds = []
for op_b in _itertools.product(*self.basisInds_action):
vec_index = vec_index_noop
for i, bInd in zip(self.actionInds, op_b):
#b[i] = bInd #don't need to do this; just update vec_index:
vec_index += self.multipliers[i] * bInd
inds.append(offset + vec_index)
embedded_instate = DMStateRep(state.base[inds])
embedded_outstate = self.embedded.adjoint_acton(embedded_instate)
output_state.base[inds] += embedded_outstate.base
#act on other blocks trivially:
self._acton_other_blocks_trivially(output_state, state)
return output_state
class DMOpRep_Composed(DMOpRep):
def __init__(self, factor_op_reps, dim):
#assert(len(factor_op_reps) > 0), "Composed gates must contain at least one factor gate!"
self.factor_reps = factor_op_reps
super(DMOpRep_Composed, self).__init__(dim)
def __reduce__(self):
return (DMOpRep_Composed, (self.factor_reps, self.dim))
def acton(self, state):
""" Act this gate map on an input state """
for gate in self.factor_reps:
state = gate.acton(state)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
for gate in reversed(self.factor_reps):
state = gate.adjoint_acton(state)
return state
def reinit_factor_op_reps(self, new_factor_op_reps):
self.factor_reps = new_factor_op_reps
class DMOpRep_Sum(DMOpRep):
def __init__(self, factor_reps, dim):
#assert(len(factor_reps) > 0), "Summed gates must contain at least one factor gate!"
self.factor_reps = factor_reps
super(DMOpRep_Sum, self).__init__(dim)
def __reduce__(self):
return (DMOpRep_Sum, (self.factor_reps, self.dim))
def acton(self, state):
""" Act this gate map on an input state """
output_state = DMStateRep(_np.zeros(state.base.shape, 'd'))
for f in self.factor_reps:
output_state.base += f.acton(state).base
return output_state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
output_state = DMStateRep(_np.zeros(state.base.shape, 'd'))
for f in self.factor_reps:
output_state.base += f.adjoint_acton(state).base
return output_state
class DMOpRep_Exponentiated(DMOpRep):
def __init__(self, exponentiated_op_rep, power, dim):
self.exponentiated_op = exponentiated_op_rep
self.power = power
super(DMOpRep_Exponentiated, self).__init__(dim)
def __reduce__(self):
return (DMOpRep_Exponentiated, (self.exponentiated_op, self.power, self.dim))
def acton(self, state):
""" Act this gate map on an input state """
for i in range(self.power):
state = self.exponentiated_op.acton(state)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
for i in range(self.power):
state = self.exponentiated_op.adjoint_acton(state)
return state
class DMOpRep_Lindblad(DMOpRep):
def __init__(self, errgen_rep,
mu, eta, m_star, s, unitarypost_data,
unitarypost_indices, unitarypost_indptr):
dim = errgen_rep.dim
self.errgen_rep = errgen_rep
if len(unitarypost_data) > 0: # (nnz > 0)
self.unitary_postfactor = _sps.csr_matrix(
(unitarypost_data, unitarypost_indices,
unitarypost_indptr), shape=(dim, dim))
else:
self.unitary_postfactor = None # no unitary postfactor
self.mu = mu
self.eta = eta
self.m_star = m_star
self.s = s
super(DMOpRep_Lindblad, self).__init__(dim)
def set_exp_params(self, mu, eta, m_star, s):
self.mu = mu
self.eta = eta
self.m_star = m_star
self.s = s
def get_exp_params(self):
return (self.mu, self.eta, self.m_star, self.s)
def __reduce__(self):
if self.unitary_postfactor is None:
return (DMOpRep_Lindblad, (self.errgen_rep, self.mu, self.eta, self.m_star, self.s,
_np.empty(0, 'd'), _np.empty(0, _np.int64), _np.zeros(1, _np.int64)))
else:
return (DMOpRep_Lindblad, (self.errgen_rep, self.mu, self.eta, self.m_star, self.s,
self.unitary_postfactor.data, self.unitary_postfactor.indices,
self.unitary_postfactor.indptr))
def acton(self, state):
""" Act this gate map on an input state """
if self.unitary_postfactor is not None:
statedata = self.unitary_postfactor.dot(state.base)
else:
statedata = state.base
tol = 1e-16 # 2^-53 (=Scipy default) -- TODO: make into an arg?
A = self.errgen_rep.aslinearoperator() # ~= a sparse matrix for call below
statedata = _mt._custom_expm_multiply_simple_core(
A, statedata, self.mu, self.m_star, self.s, tol, self.eta)
return DMStateRep(statedata)
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
raise NotImplementedError("No adjoint action implemented for sparse Lindblad LinearOperator Reps yet.")
class DMOpRep_Sparse(DMOpRep):
def __init__(self, A_data, A_indices, A_indptr):
dim = len(A_indptr) - 1
self.A = _sps.csr_matrix((A_data, A_indices, A_indptr), shape=(dim, dim))
super(DMOpRep_Sparse, self).__init__(dim)
def __reduce__(self):
return (DMOpRep_Sparse, (self.A.data, self.A.indices, self.A.indptr))
@property
def data(self):
return self.A.data
@property
def indices(self):
return self.A.indices
@property
def indptr(self):
return self.A.indptr
def acton(self, state):
""" Act this gate map on an input state """
return DMStateRep(self.A.dot(state.base))
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
Aadj = self.A.conjugate(copy=True).transpose()
return DMStateRep(Aadj.dot(state.base))
# State vector (SV) propagation wrapper classes
class SVStateRep(object):
def __init__(self, data, reducefix=0):
assert(data.dtype == _np.dtype(complex))
if reducefix == 0:
self.base = data
else:
# because serialization of numpy array flags is borked (around Numpy v1.16), we need to copy data
# (so self.base *owns* it's data) and manually convey the writeable flag.
self.base = _np.require(data.copy(), requirements=['OWNDATA', 'C_CONTIGUOUS'])
self.base.flags.writeable = True if reducefix == 1 else False
def __reduce__(self):
reducefix = 1 if self.base.flags.writeable else 2
return (SVStateRep, (self.base, reducefix))
def copy_from(self, other):
self.base = other.base.copy()
@property
def dim(self):
return len(self.base)
def todense(self):
return self.base
def __str__(self):
return str(self.base)
class SVEffectRep(object):
def __init__(self, dim):
self.dim = dim
def probability(self, state):
return abs(self.amplitude(state))**2
def amplitude(self, state):
raise NotImplementedError()
class SVEffectRep_Dense(SVEffectRep):
def __init__(self, data, reducefix=0):
assert(data.dtype == _np.dtype(complex))
if reducefix == 0:
self.base = data
else:
# because serialization of numpy array flags is borked (around Numpy v1.16), we need to copy data
# (so self.base *owns* it's data) and manually convey the writeable flag.
self.base = _np.require(data.copy(), requirements=['OWNDATA', 'C_CONTIGUOUS'])
self.base.flags.writeable = True if reducefix == 1 else False
super(SVEffectRep_Dense, self).__init__(len(self.base))
def __reduce__(self):
reducefix = 1 if self.base.flags.writeable else 2
return (SVEffectRep_Dense, (self.base, reducefix))
def amplitude(self, state):
# can assume state is a SVStateRep
return _np.vdot(self.base, state.base) # (or just 'dot')
class SVEffectRep_TensorProd(SVEffectRep):
def __init__(self, kron_array, factor_dims, nfactors, max_factor_dim, dim):
# int dim = _np.product(factor_dims) -- just send as argument for speed?
assert(dim == _np.product(factor_dims))
self.kron_array = kron_array
self.factor_dims = factor_dims
self.nfactors = nfactors
self.max_factor_dim = max_factor_dim # Unused
super(SVEffectRep_TensorProd, self).__init__(dim)
def __reduce__(self):
return (SVEffectRep_TensorProd, (self.kron_array, self.factor_dims,
self.nfactors, self.max_factor_dim, self.dim))
def todense(self, outvec):
N = self.dim
#Put last factor at end of outvec
k = self.nfactors - 1 # last factor
off = N - self.factor_dims[k] # offset into outvec
for i in range(self.factor_dims[k]):
outvec[off + i] = self.kron_array[k, i]
sz = self.factor_dims[k]
#Repeatedly scale© last "sz" elements of outputvec forward
# (as many times as there are elements in the current factor array)
# - but multiply *in-place* the last "sz" elements.
for k in range(self.nfactors - 2, -1, -1): # for all but the last factor
off = N - sz * self.factor_dims[k]
endoff = N - sz
#For all but the final element of self.kron_array[k,:],
# mult© final sz elements of outvec into position
for j in range(self.factor_dims[k] - 1):
mult = self.kron_array[k, j]
for i in range(sz):
outvec[off + i] = mult * outvec[endoff + i]
off += sz
#Last element: in-place mult
#assert(off == endoff)
mult = self.kron_array[k, self.factor_dims[k] - 1]
for i in range(sz):
outvec[endoff + i] *= mult
sz *= self.factor_dims[k]
return outvec
def amplitude(self, state): # allow scratch to be passed in?
scratch = _np.empty(self.dim, complex)
Edense = self.todense(scratch)
return _np.vdot(Edense, state.base)
class SVEffectRep_Computational(SVEffectRep):
def __init__(self, zvals, dim):
# int dim = 4**len(zvals) -- just send as argument for speed?
assert(dim == 2**len(zvals))
assert(len(zvals) <= 64), "Cannot create a Computational basis rep with >64 qubits!"
# Current storage of computational basis states converts zvals -> 64-bit integer
# Different than DM counterpart
# as each factor only has *1* nonzero element so final state has only a
# *single* nonzero element! We just have to figure out where that
# single element lies (compute it's index) based on the given zvals.
# Assume, like tensorprod, that factor ordering == kron ordering
# so nonzer_index = kron( factor[0], factor[1], ... factor[N-1] ).
base = 2**(len(zvals) - 1)
self.nonzero_index = 0
self.zvals = zvals
for k, v in enumerate(zvals):
assert(v in (0, 1)), "zvals must contain only 0s and 1s"
self.nonzero_index += base * v
base //= 2 # or right shift?
super(SVEffectRep_Computational, self).__init__(dim)
def __reduce__(self):
return (SVEffectRep_Computational, (self.zvals, self.dim))
def todense(self, outvec, trust_outvec_sparsity=False):
# when trust_outvec_sparsity is True, assume we only need to fill in the
# non-zero elements of outvec (i.e. that outvec is already zero wherever
# this vector is zero).
if not trust_outvec_sparsity:
outvec[:] = 0 # reset everything to zero
outvec[self.nonzero_index] = 1.0
return outvec
def amplitude(self, state): # allow scratch to be passed in?
scratch = _np.empty(self.dim, complex)
Edense = self.todense(scratch)
return _np.vdot(Edense, state.base)
class SVOpRep(object):
def __init__(self, dim):
self.dim = dim
def acton(self, state):
raise NotImplementedError()
def adjoint_acton(self, state):
raise NotImplementedError()
class SVOpRep_Dense(SVOpRep):
def __init__(self, data, reducefix=0):
if reducefix == 0:
self.base = data
else:
# because serialization of numpy array flags is borked (around Numpy v1.16), we need to copy data
# (so self.base *owns* it's data) and manually convey the writeable flag.
self.base = _np.require(data.copy(), requirements=['OWNDATA', 'C_CONTIGUOUS'])
self.base.flags.writeable = True if reducefix == 1 else False
super(SVOpRep_Dense, self).__init__(self.base.shape[0])
def __reduce__(self):
reducefix = 1 if self.base.flags.writeable else 2
return (SVOpRep_Dense, (self.base, reducefix))
def acton(self, state):
return SVStateRep(_np.dot(self.base, state.base))
def adjoint_acton(self, state):
return SVStateRep(_np.dot(_np.conjugate(self.base.T), state.base))
def __str__(self):
return "SVOpRep_Dense:\n" + str(self.base)
class SVOpRep_Embedded(SVOpRep):
# exactly the same as DM case
def __init__(self, embedded_op, numBasisEls, actionInds,
blocksizes, embedded_dim, nComponentsInActiveBlock,
iActiveBlock, nBlocks, dim):
self.embedded = embedded_op
self.numBasisEls = numBasisEls
self.actionInds = actionInds
self.blocksizes = blocksizes
numBasisEls_noop_blankaction = numBasisEls.copy()
for i in actionInds: numBasisEls_noop_blankaction[i] = 1
self.basisInds_noop_blankaction = [list(range(n)) for n in numBasisEls_noop_blankaction]
# multipliers to go from per-label indices to tensor-product-block index
# e.g. if map(len,basisInds) == [1,4,4] then multipliers == [ 16 4 1 ]
self.multipliers = _np.array(_np.flipud(_np.cumprod([1] + list(
reversed(list(numBasisEls[1:]))))), _np.int64)
self.basisInds_action = [list(range(numBasisEls[i])) for i in actionInds]
self.embeddedDim = embedded_dim
self.nComponents = nComponentsInActiveBlock
self.iActiveBlock = iActiveBlock
self.nBlocks = nBlocks
self.offset = sum(blocksizes[0:iActiveBlock])
super(SVOpRep_Embedded, self).__init__(dim)
def __reduce__(self):
return (DMOpRep_Embedded, (self.embedded,
self.numBasisEls, self.actionInds,
self.blocksizes, self.embeddedDim,
self.nComponents, self.iActiveBlock,
self.nBlocks, self.dim))
def _acton_other_blocks_trivially(self, output_state, state):
offset = 0
for iBlk, blockSize in enumerate(self.blocksizes):
if iBlk != self.iActiveBlock:
output_state.base[offset:offset + blockSize] = state.base[offset:offset + blockSize] # identity op
offset += blockSize
def acton(self, state):
output_state = SVStateRep(_np.zeros(state.base.shape, complex))
offset = self.offset # if relToBlock else self.offset (relToBlock == False here)
for b in _itertools.product(*self.basisInds_noop_blankaction): # zeros in all action-index locations
vec_index_noop = _np.dot(self.multipliers, tuple(b))
inds = []
for op_b in _itertools.product(*self.basisInds_action):
vec_index = vec_index_noop
for i, bInd in zip(self.actionInds, op_b):
#b[i] = bInd #don't need to do this; just update vec_index:
vec_index += self.multipliers[i] * bInd
inds.append(offset + vec_index)
embedded_instate = SVStateRep(state.base[inds])
embedded_outstate = self.embedded.acton(embedded_instate)
output_state.base[inds] += embedded_outstate.base
#act on other blocks trivially:
self._acton_other_blocks_trivially(output_state, state)
return output_state
def adjoint_acton(self, state):
""" Act the adjoint of this gate map on an input state """
#NOTE: Same as acton except uses 'adjoint_acton(...)' below
output_state = SVStateRep(_np.zeros(state.base.shape, complex))
offset = self.offset # if relToBlock else self.offset (relToBlock == False here)
for b in _itertools.product(*self.basisInds_noop_blankaction): # zeros in all action-index locations
vec_index_noop = _np.dot(self.multipliers, tuple(b))
inds = []
for op_b in _itertools.product(*self.basisInds_action):
vec_index = vec_index_noop
for i, bInd in zip(self.actionInds, op_b):
#b[i] = bInd #don't need to do this; just update vec_index:
vec_index += self.multipliers[i] * bInd
inds.append(offset + vec_index)
embedded_instate = SVStateRep(state.base[inds])
embedded_outstate = self.embedded.adjoint_acton(embedded_instate)
output_state.base[inds] += embedded_outstate.base
#act on other blocks trivially:
self._acton_other_blocks_trivially(output_state, state)
return output_state
class SVOpRep_Composed(SVOpRep):
# exactly the same as DM case
def __init__(self, factor_op_reps, dim):
#assert(len(factor_op_reps) > 0), "Composed gates must contain at least one factor gate!"
self.factors_reps = factor_op_reps
super(SVOpRep_Composed, self).__init__(dim)
def __reduce__(self):
return (SVOpRep_Composed, (self.factor_reps, self.dim))
def acton(self, state):
""" Act this gate map on an input state """
for gate in self.factor_reps:
state = gate.acton(state)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
for gate in reversed(self.factor_reps):
state = gate.adjoint_acton(state)
return state
def reinit_factor_op_reps(self, new_factor_op_reps):
self.factors_reps = new_factor_op_reps
class SVOpRep_Sum(SVOpRep):
# exactly the same as DM case
def __init__(self, factor_reps, dim):
#assert(len(factor_reps) > 0), "Composed gates must contain at least one factor gate!"
self.factor_reps = factor_reps
super(SVOpRep_Sum, self).__init__(dim)
def __reduce__(self):
return (SVOpRep_Sum, (self.factor_reps, self.dim))
def acton(self, state):
""" Act this gate map on an input state """
output_state = SVStateRep(_np.zeros(state.base.shape, complex))
for f in self.factor_reps:
output_state.base += f.acton(state).base
return output_state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
output_state = SVStateRep(_np.zeros(state.base.shape, complex))
for f in self.factor_reps:
output_state.base += f.adjoint_acton(state).base
return output_state
class SVOpRep_Exponentiated(SVOpRep):
def __init__(self, exponentiated_op_rep, power, dim):
self.exponentiated_op = exponentiated_op_rep
self.power = power
super(SVOpRep_Exponentiated, self).__init__(dim)
def __reduce__(self):
return (SVOpRep_Exponentiated, (self.exponentiated_op, self.power, self.dim))
def acton(self, state):
""" Act this gate map on an input state """
for i in range(self.power):
state = self.exponentiated_op.acton(state)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
for i in range(self.power):
state = self.exponentiated_op.adjoint_acton(state)
return state
# Stabilizer state (SB) propagation wrapper classes
class SBStateRep(object):
def __init__(self, smatrix, pvectors, amps):
from ..stabilizer import StabilizerFrame as _StabilizerFrame
self.sframe = _StabilizerFrame(smatrix, pvectors, amps)
# just rely on StabilizerFrame class to do all the heavy lifting...
def __reduce__(self):
return (SBStateRep, (self.sframe.s, self.sframe.ps, self.sframe.a))
@property
def smatrix(self):
return self.sframe.s
@property
def pvectors(self):
return self.sframe.ps
@property
def amps(self):
return self.sframe.a
@property
def nqubits(self):
return self.sframe.n
@property
def dim(self):
return 2**self.nqubits # assume "unitary evolution"-type mode
def copy(self):
cpy = SBStateRep(_np.zeros((0, 0), _np.int64), None, None) # makes a dummy cpy.sframe
cpy.sframe = self.sframe.copy() # a legit copy *with* qubit filers copied too
return cpy
def __str__(self):
return "SBStateRep:\n" + str(self.sframe)
class SBEffectRep(object):
def __init__(self, zvals):
self.zvals = zvals
def __reduce__(self):
return (SBEffectRep, (self.zvals,))
@property
def nqubits(self):
return len(self.zvals)
@property
def dim(self):
return 2**self.nqubits # assume "unitary evolution"-type mode
def probability(self, state):
return state.sframe.measurement_probability(self.zvals, check=True) # use check for now?
def amplitude(self, state):
return state.sframe.extract_amplitude(self.zvals)
class SBOpRep(object):
def __init__(self, n):
self.n = n # number of qubits
def acton(self, state):
raise NotImplementedError()
def adjoint_acton(self, state):
raise NotImplementedError()
@property
def nqubits(self):
return self.n
@property
def dim(self):
return 2**(self.n) # assume "unitary evolution"-type mode
class SBOpRep_Embedded(SBOpRep):
def __init__(self, embedded_op, n, qubits):
self.embedded = embedded_op
self.qubits = qubits # qubit *indices*
super(SBOpRep_Embedded, self).__init__(n)
def __reduce__(self):
return (SBOpRep_Embedded, (self.embedded, self.n, self.qubits))
def acton(self, state):
state = state.copy() # needed?
state.sframe.push_view(self.qubits)
outstate = self.embedded.acton(state) # works b/c sfame has "view filters"
state.sframe.pop_view() # return input state to original view
outstate.sframe.pop_view()
return outstate
def adjoint_acton(self, state):
state = state.copy() # needed?
state.sframe.push_view(self.qubits)
outstate = self.embedded.adjoint_acton(state) # works b/c sfame has "view filters"
state.sframe.pop_view() # return input state to original view
outstate.sframe.pop_view()
return outstate
class SBOpRep_Composed(SBOpRep):
# exactly the same as DM case except .dim -> .n
def __init__(self, factor_op_reps, n):
#assert(len(factor_op_reps) > 0), "Composed gates must contain at least one factor gate!"
self.factor_reps = factor_op_reps
super(SBOpRep_Composed, self).__init__(n)
def __reduce__(self):
return (SBOpRep_Composed, (self.factor_reps, self.n))
def acton(self, state):
""" Act this gate map on an input state """
for gate in self.factor_reps:
state = gate.acton(state)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
for gate in reversed(self.factor_reps):
state = gate.adjoint_acton(state)
return state
class SBOpRep_Sum(SBOpRep):
# exactly the same as DM case except .dim -> .n
def __init__(self, factor_reps, n):
#assert(len(factor_reps) > 0), "Composed gates must contain at least one factor gate!"
self.factor_reps = factor_reps
super(SBOpRep_Sum, self).__init__(n)
def __reduce__(self):
return (SBOpRep_Sum, (self.factor_reps, self.n))
def acton(self, state):
""" Act this gate map on an input state """
# need further stabilizer frame support to represent the sum of stabilizer states
raise NotImplementedError()
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
# need further stabilizer frame support to represent the sum of stabilizer states
raise NotImplementedError()
class SBOpRep_Exponentiated(SBOpRep):
def __init__(self, exponentiated_op_rep, power, n):
self.exponentiated_op = exponentiated_op_rep
self.power = power
super(SBOpRep_Exponentiated, self).__init__(n)
def __reduce__(self):
return (SBOpRep_Exponentiated, (self.exponentiated_op, self.power, self.n))
def acton(self, state):
""" Act this gate map on an input state """
for i in range(self.power):
state = self.exponentiated_op.acton(state)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
for i in range(self.power):
state = self.exponentiated_op.adjoint_acton(state)
return state
class SBOpRep_Clifford(SBOpRep):
def __init__(self, smatrix, svector, smatrix_inv, svector_inv, unitary):
self.smatrix = smatrix
self.svector = svector
self.smatrix_inv = smatrix_inv
self.svector_inv = svector_inv
self.unitary = unitary
super(SBOpRep_Clifford, self).__init__(smatrix.shape[0] // 2)
def __reduce__(self):
return (SBOpRep_Clifford, (self.smatrix, self.svector, self.smatrix_inv, self.svector_inv, self.unitary))
@property
def unitary_dagger(self):
return _np.conjugate(self.unitary.T)
def acton(self, state):
""" Act this gate map on an input state """
state = state.copy() # (copies any qubit filters in .sframe too)
state.sframe.clifford_update(self.smatrix, self.svector, self.unitary)
return state
def adjoint_acton(self, state):
""" Act the adjoint of this operation matrix on an input state """
# Note: cliffords are unitary, so adjoint == inverse
state = state.copy() # (copies any qubit filters in .sframe too)
state.sframe.clifford_update(self.smatrix_inv, self.svector_inv,
_np.conjugate(self.unitary.T))
return state
# Other classes
class PolyRep(dict):
"""
Representation class for a polynomial.
This is similar to a full Polynomial
dictionary, but lacks some functionality and is optimized for computation
speed. In particular, the keys of this dict are not tuples of variable
indices (as in Polynomial) but simple integers encoded from such tuples.
To perform this mapping, one must specify a maximum order and number of
variables.
"""
def __init__(self, int_coeff_dict, max_num_vars, vindices_per_int):
"""
Create a new PolyRep object.
Parameters
----------
int_coeff_dict : dict
A dictionary of coefficients whose keys are already-encoded
integers corresponding to variable-index-tuples (i.e poly
terms).
max_num_vars : int
The maximum number of variables allowed. For example, if
set to 2, then only "x0" and "x1" are allowed to appear
in terms.
"""
self.max_num_vars = max_num_vars
self.vindices_per_int = vindices_per_int
super(PolyRep, self).__init__()
if int_coeff_dict is not None:
self.update(int_coeff_dict)
def reinit(self, int_coeff_dict):
""" TODO: docstring """
self.clear()
self.update(int_coeff_dict)
def mapvec_indices_inplace(self, mapfn_as_vector):
new_items = {}
for k, v in self.items():
new_vinds = tuple((mapfn_as_vector[j] for j in self._int_to_vinds(k)))
new_items[self._vinds_to_int(new_vinds)] = v
self.clear()
self.update(new_items)
def copy(self):
"""
Make a copy of this polynomial representation.
Returns
-------
PolyRep
"""
return PolyRep(self, self.max_num_vars, self.vindices_per_int) # construct expects "int" keys
def abs(self):
"""
Return a polynomial whose coefficents are the absolute values of this PolyRep's coefficients.
Returns
-------
PolyRep
"""
result = {k: abs(v) for k, v in self.items()}
return PolyRep(result, self.max_num_vars, self.vindices_per_int)
@property
def int_coeffs(self): # so we can convert back to python Polys
""" The coefficient dictionary (with encoded integer keys) """
return dict(self) # for compatibility w/C case which can't derive from dict...
#UNUSED TODO REMOVE
#def map_indices_inplace(self, mapfn):
# """
# Map the variable indices in this `PolyRep`.
# This allows one to change the "labels" of the variables.
#
# Parameters
# ----------
# mapfn : function
# A single-argument function that maps old variable-index tuples
# to new ones. E.g. `mapfn` might map `(0,1)` to `(10,11)` if
# we were increasing each variable index by 10.
#
# Returns
# -------
# None
# """
# new_items = {self._vinds_to_int(mapfn(self._int_to_vinds(k))): v
# for k, v in self.items()}
# self.clear()
# self.update(new_items)
#
#def set_maximums(self, max_num_vars=None):
# """
# Alter the maximum order and number of variables (and hence the
# tuple-to-int mapping) for this polynomial representation.
#
# Parameters
# ----------
# max_num_vars : int
# The maximum number of variables allowed.
#
# Returns
# -------
# None
# """
# coeffs = {self._int_to_vinds(k): v for k, v in self.items()}
# if max_num_vars is not None: self.max_num_vars = max_num_vars
# int_coeffs = {self._vinds_to_int(k): v for k, v in coeffs.items()}
# self.clear()
# self.update(int_coeffs)
def _vinds_to_int(self, vinds):
""" Maps tuple of variable indices to encoded int """
ints_in_key = int(_np.ceil(len(vinds) / self.vindices_per_int))
ret_tup = []
for k in range(ints_in_key):
ret = 0; m = 1
# last tuple index is most significant
for i in vinds[k * self.vindices_per_int:(k + 1) * self.vindices_per_int]:
assert(i < self.max_num_vars), "Variable index exceed maximum!"
ret += (i + 1) * m
m *= self.max_num_vars + 1
assert(ret >= 0), "vinds = %s -> %d!!" % (str(vinds), ret)
ret_tup.append(ret)
return tuple(ret_tup)
def _int_to_vinds(self, indx_tup):
""" Maps encoded "int" to tuple of variable indices """
ret = []
#DB: cnt = 0; orig = indx
for indx in indx_tup:
while indx != 0:
nxt = indx // (self.max_num_vars + 1)
i = indx - nxt * (self.max_num_vars + 1)
ret.append(i - 1)
indx = nxt
#DB: cnt += 1
#DB: if cnt > 50: print("VINDS iter %d - indx=%d (orig=%d, nv=%d)" % (cnt,indx,orig,self.max_num_vars))
return tuple(sorted(ret))
#UNUSED TODO REMOVE
#def deriv(self, wrtParam):
# """
# Take the derivative of this polynomial representation with respect to
# the single variable `wrtParam`.
#
# Parameters
# ----------
# wrtParam : int
# The variable index to differentiate with respect to (can be
# 0 to the `max_num_vars-1` supplied to `__init__`.
#
# Returns
# -------
# PolyRep
# """
# dcoeffs = {}
# for i, coeff in self.items():
# ivar = self._int_to_vinds(i)
# cnt = float(ivar.count(wrtParam))
# if cnt > 0:
# l = list(ivar)
# del l[ivar.index(wrtParam)]
# dcoeffs[tuple(l)] = cnt * coeff
# int_dcoeffs = {self._vinds_to_int(k): v for k, v in dcoeffs.items()}
# return PolyRep(int_dcoeffs, self.max_num_vars, self.vindices_per_int)
#def evaluate(self, variable_values):
# """
# Evaluate this polynomial at the given variable values.
#
# Parameters
# ----------
# variable_values : iterable
# The values each variable will be evaluated at. Must have
# length at least equal to the number of variables present
# in this `PolyRep`.
#
# Returns
# -------
# float or complex
# """
# #FUTURE and make this function smarter (Russian peasant)?
# ret = 0
# for i, coeff in self.items():
# ivar = self._int_to_vinds(i)
# ret += coeff * _np.product([variable_values[i] for i in ivar])
# return ret
def compact_complex(self):
"""
Returns a compact representation of this polynomial as a
`(variable_tape, coefficient_tape)` 2-tuple of 1D nupy arrays.
The coefficient tape is *always* a complex array, even if
none of the polynomial's coefficients are complex.
Such compact representations are useful for storage and later
evaluation, but not suited to polynomial manipulation.
Returns
-------
vtape : numpy.ndarray
A 1D array of integers (variable indices).
ctape : numpy.ndarray
A 1D array of *complex* coefficients.
"""
nTerms = len(self)
vinds = {i: self._int_to_vinds(i) for i in self.keys()}
nVarIndices = sum(map(len, vinds.values()))
vtape = _np.empty(1 + nTerms + nVarIndices, _np.int64) # "variable" tape
ctape = _np.empty(nTerms, complex) # "coefficient tape"
i = 0
vtape[i] = nTerms; i += 1
for iTerm, k in enumerate(sorted(self.keys())):
v = vinds[k] # so don't need to compute self._int_to_vinds(k)
l = len(v)
ctape[iTerm] = self[k]
vtape[i] = l; i += 1
vtape[i:i + l] = v; i += l
assert(i == len(vtape)), "Logic Error!"
return vtape, ctape
def compact_real(self):
"""
Returns a real representation of this polynomial as a
`(variable_tape, coefficient_tape)` 2-tuple of 1D nupy arrays.
The coefficient tape is *always* a complex array, even if
none of the polynomial's coefficients are complex.
Such compact representations are useful for storage and later
evaluation, but not suited to polynomial manipulation.
Returns
-------
vtape : numpy.ndarray
A 1D array of integers (variable indices).
ctape : numpy.ndarray
A 1D array of *real* coefficients.
"""
nTerms = len(self)
vinds = {i: self._int_to_vinds(i) for i in self.keys()}
nVarIndices = sum(map(len, vinds.values()))
vtape = _np.empty(1 + nTerms + nVarIndices, _np.int64) # "variable" tape
ctape = _np.empty(nTerms, complex) # "coefficient tape"
i = 0
vtape[i] = nTerms; i += 1
for iTerm, k in enumerate(sorted(self.keys())):
v = vinds[k] # so don't need to compute self._int_to_vinds(k)
l = len(v)
ctape[iTerm] = self[k]
vtape[i] = l; i += 1
vtape[i:i + l] = v; i += l
assert(i == len(vtape)), "Logic Error!"
return vtape, ctape
def mult(self, x):
"""
Returns `self * x` where `x` is another polynomial representation.
Parameters
----------
x : PolyRep
Returns
-------
PolyRep
"""
assert(self.max_num_vars == x.max_num_vars)
newpoly = PolyRep(None, self.max_num_vars, self.vindices_per_int)
for k1, v1 in self.items():
for k2, v2 in x.items():
inds = sorted(self._int_to_vinds(k1) + x._int_to_vinds(k2))
k = newpoly._vinds_to_int(inds)
if k in newpoly: newpoly[k] += v1 * v2
else: newpoly[k] = v1 * v2
assert(newpoly.degree() <= self.degree() + x.degree())
return newpoly
def scale(self, x):
"""
Performs `self = self * x` where `x` is a scalar.
Parameters
----------
x : float or complex
Returns
-------
None
"""
# assume a scalar that can multiply values
for k in self:
self[k] *= x
def add_inplace(self, other):
"""
Adds `other` into this PolyRep.
Parameters
----------
other : PolyRep
Returns
-------
PolyRep
"""
for k, v in other.items():
try:
self[k] += v
except KeyError:
self[k] = v
return self
def add_scalar_to_all_coeffs_inplace(self, x):
"""
Adds `x` to all of the coefficients in this PolyRep.
Parameters
----------
x : float or complex
Returns
-------
PolyRep
"""
for k in self:
self[k] += x
return self
#UNUSED TODO REMOVE
#def scalar_mult(self, x):
# """
# Returns `self * x` where `x` is a scalar.
#
# Parameters
# ----------
# x : float or complex
#
# Returns
# -------
# PolyRep
# """
# # assume a scalar that can multiply values
# newpoly = self.copy()
# newpoly.scale(x)
# return newpoly
def __str__(self):
def fmt(x):
if abs(_np.imag(x)) > 1e-6:
if abs(_np.real(x)) > 1e-6: return "(%.3f+%.3fj)" % (x.real, x.imag)
else: return "(%.3fj)" % x.imag
else: return "%.3f" % x.real
termstrs = []
sorted_keys = sorted(list(self.keys()))
for k in sorted_keys:
vinds = self._int_to_vinds(k)
varstr = ""; last_i = None; n = 0
for i in sorted(vinds):
if i == last_i: n += 1
elif last_i is not None:
varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "")
last_i = i
if last_i is not None:
varstr += "x%d%s" % (last_i, ("^%d" % n) if n > 1 else "")
#print("DB: vinds = ",vinds, " varstr = ",varstr)
if abs(self[k]) > 1e-4:
termstrs.append("%s%s" % (fmt(self[k]), varstr))
if len(termstrs) > 0:
return " + ".join(termstrs)
else: return "0"
def __repr__(self):
return "PolyRep[ " + str(self) + " ]"
def degree(self):
""" Used for debugging in slowreplib routines only"""
return max([len(self._int_to_vinds(k)) for k in self.keys()])
#UNUSED TODO REMOVE
#def __add__(self, x):
# newpoly = self.copy()
# if isinstance(x, PolyRep):
# assert(self.max_num_vars == x.max_num_vars)
# for k, v in x.items():
# if k in newpoly: newpoly[k] += v
# else: newpoly[k] = v
# else: # assume a scalar that can be added to values
# for k in newpoly:
# newpoly[k] += x
# return newpoly
#
#def __mul__(self, x):
# if isinstance(x, PolyRep):
# return self.mult_poly(x)
# else: # assume a scalar that can multiply values
# return self.mult_scalar(x)
#
#def __rmul__(self, x):
# return self.__mul__(x)
#
#def __pow__(self, n):
# ret = PolyRep({0: 1.0}, self.max_num_vars, self.vindices_per_int)
# cur = self
# for i in range(int(_np.floor(_np.log2(n))) + 1):
# rem = n % 2 # gets least significant bit (i-th) of n
# if rem == 1: ret *= cur # add current power of x (2^i) if needed
# cur = cur * cur # current power *= 2
# n //= 2 # shift bits of n right
# return ret
#
#def __copy__(self):
# return self.copy()
#
#def debug_report(self):
# actual_max_order = max([len(self._int_to_vinds(k)) for k in self.keys()])
# return "PolyRep w/max_vars=%d: nterms=%d, actual max-order=%d" % \
# (self.max_num_vars, len(self), actual_max_order)
#
class SVTermRep(object):
# just a container for other reps (polys, states, effects, and gates)
@classmethod
def composed(cls, terms_to_compose, magnitude):
logmag = _math.log10(magnitude) if magnitude > 0 else -LARGE
first = terms_to_compose[0]
coeffrep = first.coeff
pre_ops = first.pre_ops[:]
post_ops = first.post_ops[:]
for t in terms_to_compose[1:]:
coeffrep = coeffrep.mult(t.coeff)
pre_ops += t.pre_ops
post_ops += t.post_ops
return SVTermRep(coeffrep, magnitude, logmag, first.pre_state, first.post_state,
first.pre_effect, first.post_effect, pre_ops, post_ops)
def __init__(self, coeff, mag, logmag, pre_state, post_state,
pre_effect, post_effect, pre_ops, post_ops):
self.coeff = coeff
self.magnitude = mag
self.logmagnitude = logmag
self.pre_state = pre_state
self.post_state = post_state
self.pre_effect = pre_effect
self.post_effect = post_effect
self.pre_ops = pre_ops
self.post_ops = post_ops
def set_magnitude(self, mag):
self.magnitude = mag
self.logmagnitude = _math.log10(mag) if mag > 0 else -LARGE
def set_magnitude_only(self, mag):
self.magnitude = mag
def mapvec_indices_inplace(self, mapvec):
self.coeff.mapvec_indices_inplace(mapvec)
def scalar_mult(self, x):
coeff = self.coeff.copy()
coeff.scale(x)
return SVTermRep(coeff, self.magnitude, self.logmagnitude,
self.pre_state, self.post_state, self.pre_effect, self.post_effect,
self.pre_ops, self.post_ops)
def copy(self):
return SVTermRep(self.coeff.copy(), self.magnitude, self.logmagnitude,
self.pre_state, self.post_state, self.pre_effect, self.post_effect,
self.pre_ops, self.post_ops)
class SBTermRep(object):
# exactly the same as SVTermRep
# just a container for other reps (polys, states, effects, and gates)
@classmethod
def composed(cls, terms_to_compose, magnitude):
logmag = _math.log10(magnitude) if magnitude > 0 else -LARGE
first = terms_to_compose[0]
coeffrep = first.coeff
pre_ops = first.pre_ops[:]
post_ops = first.post_ops[:]
for t in terms_to_compose[1:]:
coeffrep = coeffrep.mult(t.coeff)
pre_ops += t.pre_ops
post_ops += t.post_ops
return SBTermRep(coeffrep, magnitude, logmag, first.pre_state, first.post_state,
first.pre_effect, first.post_effect, pre_ops, post_ops)
def __init__(self, coeff, mag, logmag, pre_state, post_state,
pre_effect, post_effect, pre_ops, post_ops):
self.coeff = coeff
self.magnitude = mag
self.logmagnitude = logmag
self.pre_state = pre_state
self.post_state = post_state
self.pre_effect = pre_effect
self.post_effect = post_effect
self.pre_ops = pre_ops
self.post_ops = post_ops
def set_magnitude(self, mag):
self.magnitude = mag
self.logmagnitude = _math.log10(mag) if mag > 0 else -LARGE
def mapvec_indices_inplace(self, mapvec):
self.coeff.mapvec_indices_inplace(mapvec)
def scalar_mult(self, x):
coeff = self.coeff.copy()
coeff.scale(x)
return SBTermRep(coeff, self.magnitude, self.logmagnitude,
self.pre_state, self.post_state, self.pre_effect, self.post_effect,
self.pre_ops, self.post_ops)
def copy(self):
return SBTermRep(self.coeff.copy(), self.magnitude, self.logmagnitude,
self.pre_state, self.post_state, self.pre_effect, self.post_effect,
self.pre_ops, self.post_ops)
# No need to create separate classes for floating-pt (vs. polynomial) coeff in Python (no types!)
SVTermDirectRep = SVTermRep
SBTermDirectRep = SBTermRep
## END CLASSES -- BEGIN CALC METHODS
def propagate_staterep(staterep, operationreps):
ret = staterep
for oprep in operationreps:
ret = oprep.acton(ret)
return ret
def DM_mapfill_probs_block(calc, mxToFill, dest_indices, evalTree, comm):
dest_indices = _slct.as_array(dest_indices) # make sure this is an array and not a slice
cacheSize = evalTree.cache_size()
#Create rhoCache
rho_cache = [None] * cacheSize # so we can store (s,p) tuples in cache
#Get operationreps and ereps now so we don't make unnecessary ._rep references
rhoreps = {rholbl: calc._rho_from_label(rholbl)._rep for rholbl in evalTree.rholabels}
operationreps = {gl: calc.sos.get_operation(gl)._rep for gl in evalTree.opLabels}
effectreps = {i: E._rep for i, E in enumerate(calc._Es_from_labels(evalTree.elabels))} # cache these in future
#comm is currently ignored
#TODO: if evalTree is split, distribute among processors
for i in evalTree.get_evaluation_order():
iStart, remainder, iCache = evalTree[i]
if iStart is None: # then first element of remainder is a state prep label
rholabel = remainder[0]
init_state = rhoreps[rholabel]
remainder = remainder[1:]
else:
init_state = rho_cache[iStart] # [:,None]
#OLD final_state = self.propagate_state(init_state, remainder)
final_state = propagate_staterep(init_state, [operationreps[gl] for gl in remainder])
if iCache is not None: rho_cache[iCache] = final_state # [:,0] #store this state in the cache
ereps = [effectreps[j] for j in evalTree.eLbl_indices_per_circuit[i]]
final_indices = [dest_indices[j] for j in evalTree.final_indices_per_circuit[i]]
for j, erep in zip(final_indices, ereps):
mxToFill[j] = erep.probability(final_state) # outcome probability
def DM_mapfill_dprobs_block(calc, mxToFill, dest_indices, dest_param_indices, evalTree, param_indices, comm):
eps = 1e-7 # hardcoded?
if param_indices is None:
param_indices = list(range(calc.Np))
if dest_param_indices is None:
dest_param_indices = list(range(_slct.length(param_indices)))
param_indices = _slct.as_array(param_indices)
dest_param_indices = _slct.as_array(dest_param_indices)
all_slices, my_slice, owners, subComm = \
_mpit.distribute_slice(slice(0, len(param_indices)), comm)
my_param_indices = param_indices[my_slice]
st = my_slice.start # beginning of where my_param_indices results
# get placed into dpr_cache
#Get a map from global parameter indices to the desired
# final index within mxToFill (fpoffset = final parameter offset)
iParamToFinal = {i: dest_param_indices[st + ii] for ii, i in enumerate(my_param_indices)}
nEls = evalTree.num_final_elements()
probs = _np.empty(nEls, 'd')
probs2 = _np.empty(nEls, 'd')
DM_mapfill_probs_block(calc, probs, slice(0, nEls), evalTree, comm)
orig_vec = calc.to_vector().copy()
for i in range(calc.Np):
#print("dprobs cache %d of %d" % (i,self.Np))
if i in iParamToFinal:
iFinal = iParamToFinal[i]
vec = orig_vec.copy(); vec[i] += eps
calc.from_vector(vec, close=True)
DM_mapfill_probs_block(calc, probs2, slice(0, nEls), evalTree, subComm)
_fas(mxToFill, [dest_indices, iFinal], (probs2 - probs) / eps)
calc.from_vector(orig_vec, close=True)
#Now each processor has filled the relavant parts of mxToFill, so gather together:
_mpit.gather_slices(all_slices, owners, mxToFill, [], axes=1, comm=comm)
def DM_mapfill_TDchi2_terms(calc, mxToFill, dest_indices, num_outcomes, evalTree, dataset_rows,
minProbClipForWeighting, probClipInterval, comm):
def obj_fn(p, f, Ni, N, omitted_p):
cp = _np.clip(p, minProbClipForWeighting, 1 - minProbClipForWeighting)
v = (p - f) * _np.sqrt(N / cp)
if omitted_p != 0:
# if this is the *last* outcome at this time then account for any omitted probability
omitted_cp = _np.clip(omitted_p, minProbClipForWeighting, 1 - minProbClipForWeighting)
v = _np.sqrt(v**2 + N * omitted_p**2 / omitted_cp)
return v # sqrt(the objective function term) (the qty stored in cache)
return DM_mapfill_TDterms(calc, obj_fn, mxToFill, dest_indices, num_outcomes, evalTree, dataset_rows, comm)
def DM_mapfill_TDloglpp_terms(calc, mxToFill, dest_indices, num_outcomes, evalTree, dataset_rows,
minProbClip, radius, probClipInterval, comm):
min_p = minProbClip; a = radius
def obj_fn(p, f, Ni, N, omitted_p):
pos_p = max(p, min_p)
if Ni != 0:
freq_term = Ni * (_np.log(f) - 1.0)
else:
freq_term = 0.0
S = -Ni / min_p + N
S2 = 0.5 * Ni / (min_p**2)
v = freq_term + -Ni * _np.log(pos_p) + N * pos_p # dims K x M (K = nSpamLabels, M = nCircuits)
# remove small negative elements due to roundoff error (above expression *cannot* really be negative)
v = max(v, 0)
# quadratic extrapolation of logl at min_p for probabilities < min_p
if p < min_p:
v = v + S * (p - min_p) + S2 * (p - min_p)**2
if Ni == 0:
if p >= a:
v = N * p
else:
v = N * ((-1.0 / (3 * a**2)) * p**3 + p**2 / a + a / 3.0)
# special handling for f == 0 terms
# using quadratic rounding of function with minimum: max(0,(a-p)^2)/(2a) + p
if omitted_p != 0.0:
# if this is the *last* outcome at this time then account for any omitted probability
v += N * omitted_p if omitted_p >= a else \
N * ((-1.0 / (3 * a**2)) * omitted_p**3 + omitted_p**2 / a + a / 3.0)
return v # objective function term (the qty stored in cache)
return DM_mapfill_TDterms(calc, obj_fn, mxToFill, dest_indices, num_outcomes, evalTree, dataset_rows, comm)
def DM_mapfill_TDterms(calc, objfn, mxToFill, dest_indices, num_outcomes, evalTree, dataset_rows, comm):
dest_indices = _slct.as_array(dest_indices) # make sure this is an array and not a slice
cacheSize = evalTree.cache_size()
EVecs = calc._Es_from_labels(evalTree.elabels)
elabels_as_outcomes = [(_gt.eLabelToOutcome(e),) for e in evalTree.elabels]
outcome_to_elabel_index = {outcome: i for i, outcome in enumerate(elabels_as_outcomes)}
assert(cacheSize == 0) # so all elements have None as start and remainder[0] is a prep label
#if clipTo is not None:
# _np.clip(mxToFill, clipTo[0], clipTo[1], out=mxToFill) # in-place clip
mxToFill[dest_indices] = 0.0 # reset destination (we sum into it)
#comm is currently ignored
#TODO: if evalTree is split, distribute among processors
for i in evalTree.get_evaluation_order():
iStart, remainder, iCache = evalTree[i]
assert(iStart is None), "Cannot use trees with max-cache-size > 0 when performing time-dependent calcs!"
rholabel = remainder[0]; remainder = remainder[1:]
rhoVec = calc._rho_from_label(rholabel)
datarow = dataset_rows[i]
nTotOutcomes = num_outcomes[i]
totalCnts = {} # TODO defaultdict?
lastInds = {}; outcome_cnts = {}
# consolidate multiple outcomes that occur at same time? or sort?
for k, (t0, Nreps) in enumerate(zip(datarow.time, datarow.reps)):
if t0 in totalCnts:
totalCnts[t0] += Nreps; outcome_cnts[t0] += 1
else:
totalCnts[t0] = Nreps; outcome_cnts[t0] = 1
lastInds[t0] = k
elbl_indices = evalTree.eLbl_indices_per_circuit[i]
final_indices = [dest_indices[j] for j in evalTree.final_indices_per_circuit[i]]
elbl_to_final_index = {elbl_index: final_index for elbl_index, final_index in zip(elbl_indices, final_indices)}
cur_probtotal = 0; last_t = 0
# consolidate multiple outcomes that occur at same time? or sort?
for k, (t0, Nreps, outcome) in enumerate(zip(datarow.time, datarow.reps, datarow.outcomes)):
t = t0
rhoVec.set_time(t)
rho = rhoVec._rep
t += rholabel.time
for gl in remainder:
op = calc.sos.get_operation(gl)
op.set_time(t); t += gl.time # time in gate label == gate duration?
rho = op._rep.acton(rho)
j = outcome_to_elabel_index[outcome]
E = EVecs[j]; E.set_time(t)
p = E._rep.probability(rho) # outcome probability
N = totalCnts[t0]
f = Nreps / N
if t0 == last_t:
cur_probtotal += p
else:
last_t = t0
cur_probtotal = p
omitted_p = 1.0 - cur_probtotal if (lastInds[t0] == k and outcome_cnts[t0] < nTotOutcomes) else 0.0
# and cur_probtotal < 1.0?
mxToFill[elbl_to_final_index[j]] += objfn(p, f, Nreps, N, omitted_p)
def DM_mapfill_TDdchi2_terms(calc, mxToFill, dest_indices, dest_param_indices, num_outcomes, evalTree, dataset_rows,
minProbClipForWeighting, probClipInterval, wrtSlice, comm):
def fillfn(mxToFill, dest_indices, n_outcomes, evTree, dataset_rows, fillComm):
DM_mapfill_TDchi2_terms(calc, mxToFill, dest_indices, n_outcomes,
evTree, dataset_rows, minProbClipForWeighting, probClipInterval, fillComm)
return DM_mapfill_timedep_dterms(calc, mxToFill, dest_indices, dest_param_indices,
num_outcomes, evalTree, dataset_rows, fillfn, wrtSlice, comm)
def DM_mapfill_TDdloglpp_terms(calc, mxToFill, dest_indices, dest_param_indices, num_outcomes,
evalTree, dataset_rows, minProbClip, radius, probClipInterval, wrtSlice, comm):
def fillfn(mxToFill, dest_indices, n_outcomes, evTree, dataset_rows, fillComm):
DM_mapfill_TDloglpp_terms(calc, mxToFill, dest_indices, n_outcomes,
evTree, dataset_rows, minProbClip, radius, probClipInterval, fillComm)
return DM_mapfill_timedep_dterms(calc, mxToFill, dest_indices, dest_param_indices,
num_outcomes, evalTree, dataset_rows, fillfn, wrtSlice, comm)
def DM_mapfill_timedep_dterms(calc, mxToFill, dest_indices, dest_param_indices, num_outcomes, evalTree,
dataset_rows, fillfn, wrtSlice, comm):
eps = 1e-7 # hardcoded?
#Compute finite difference derivatives, one parameter at a time.
param_indices = range(calc.Np) if (wrtSlice is None) else _slct.indices(wrtSlice)
nEls = evalTree.num_final_elements()
vals = _np.empty(nEls, 'd')
vals2 = _np.empty(nEls, 'd')
assert(evalTree.cache_size() == 0) # so all elements have None as start and remainder[0] is a prep label
fillfn(vals, slice(0, nEls), num_outcomes, evalTree, dataset_rows, comm)
all_slices, my_slice, owners, subComm = \
_mpit.distribute_slice(slice(0, len(param_indices)), comm)
my_param_indices = param_indices[my_slice]
st = my_slice.start # beginning of where my_param_indices results
# get placed into dpr_cache
#Get a map from global parameter indices to the desired
# final index within dpr_cache
iParamToFinal = {i: st + ii for ii, i in enumerate(my_param_indices)}
orig_vec = calc.to_vector().copy()
for i in range(calc.Np):
#print("dprobs cache %d of %d" % (i,calc.Np))
if i in iParamToFinal:
iFinal = iParamToFinal[i]
vec = orig_vec.copy(); vec[i] += eps
calc.from_vector(vec, close=True)
fillfn(vals2, slice(0, nEls), num_outcomes, evalTree, dataset_rows, subComm)
_fas(mxToFill, [dest_indices, iFinal], (vals2 - vals) / eps)
calc.from_vector(orig_vec, close=True)
#Now each processor has filled the relavant parts of dpr_cache,
# so gather together:
_mpit.gather_slices(all_slices, owners, mxToFill, [], axes=1, comm=comm)
#REMOVE
# DEBUG LINE USED FOR MONITORION N-QUBIT GST TESTS
#print("DEBUG TIME: dpr_cache(Np=%d, dim=%d, cachesize=%d, treesize=%d, napplies=%d) in %gs" %
# (calc.Np, calc.dim, cacheSize, len(evalTree), evalTree.get_num_applies(), _time.time()-tStart)) #DEBUG
def SV_prs_as_polys(calc, rholabel, elabels, circuit, comm=None, memLimit=None, fastmode=True):
return _prs_as_polys(calc, rholabel, elabels, circuit, comm, memLimit, fastmode)
def SB_prs_as_polys(calc, rholabel, elabels, circuit, comm=None, memLimit=None, fastmode=True):
return _prs_as_polys(calc, rholabel, elabels, circuit, comm, memLimit, fastmode)
#Base case which works for both SV and SB evolution types thanks to Python's duck typing
def _prs_as_polys(calc, rholabel, elabels, circuit, comm=None, memLimit=None, fastmode=True):
"""
Computes polynomials of the probabilities for multiple spam-tuples of `circuit`
Parameters
----------
calc : TermForwardSimulator
The calculator object holding vital information for the computation.
rholabel : Label
Prep label for *all* the probabilities to compute.
elabels : list
List of effect labels, one per probability to compute. The ordering
of `elabels` determines the ordering of the returned probability
polynomials.
circuit : Circuit
The gate sequence to sandwich between the prep and effect labels.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes.
fastmode : bool, optional
A switch between a faster, slighty more memory hungry mode of
computation (`fastmode=True`)and a simpler slower one (`=False`).
Returns
-------
list
A list of PolyRep objects, one per element of `elabels`.
"""
#print("PRS_AS_POLY circuit = ",circuit)
#print("DB: prs_as_polys(",spamTuple,circuit,calc.max_order,")")
#NOTE for FUTURE: to adapt this to work with numerical rather than polynomial coeffs:
# use get_direct_order_terms(order, order_base) w/order_base=0.1(?) instead of get_taylor_order_terms??
# below: replace prps with: prs = _np.zeros(len(elabels),complex) # an array in "bulk" mode
# use *= or * instead of .mult( and .scale(
# e.g. res = _np.product([f.coeff for f in factors])
# res *= (pLeft * pRight)
# - add assert(_np.linalg.norm(_np.imag(prs)) < 1e-6) at end and return _np.real(prs)
mpv = calc.Np # max_poly_vars
# Construct dict of gate term reps
distinct_gateLabels = sorted(set(circuit))
op_term_reps = {glbl:
[
[t.torep() for t in calc.sos.get_operation(glbl).get_taylor_order_terms(order, mpv)]
for order in range(calc.max_order + 1)
] for glbl in distinct_gateLabels}
#Similar with rho_terms and E_terms, but lists
rho_term_reps = [[t.torep() for t in calc.sos.get_prep(rholabel).get_taylor_order_terms(order, mpv)]
for order in range(calc.max_order + 1)]
E_term_reps = []
E_indices = []
for order in range(calc.max_order + 1):
cur_term_reps = [] # the term reps for *all* the effect vectors
cur_indices = [] # the Evec-index corresponding to each term rep
for i, elbl in enumerate(elabels):
term_reps = [t.torep() for t in calc.sos.get_effect(elbl).get_taylor_order_terms(order, mpv)]
cur_term_reps.extend(term_reps)
cur_indices.extend([i] * len(term_reps))
E_term_reps.append(cur_term_reps)
E_indices.append(cur_indices)
##DEBUG!!!
#print("DB NEW operation terms = ")
#for glbl,order_terms in op_term_reps.items():
# print("GATE ",glbl)
# for i,termlist in enumerate(order_terms):
# print("ORDER %d" % i)
# for term in termlist:
# print("Coeff: ",str(term.coeff))
#HERE DEBUG!!!
global DEBUG_FCOUNT
# db_part_cnt = 0
# db_factor_cnt = 0
#print("DB: pr_as_poly for ",str(tuple(map(str,circuit))), " max_order=",calc.max_order)
prps = [None] * len(elabels) # an array in "bulk" mode? or Polynomial in "symbolic" mode?
for order in range(calc.max_order + 1):
#print("DB: pr_as_poly order=",order)
# db_npartitions = 0
for p in _lt.partition_into(order, len(circuit) + 2): # +2 for SPAM bookends
#factor_lists = [ calc.sos.get_operation(glbl).get_order_terms(pi) for glbl,pi in zip(circuit,p) ]
factor_lists = [rho_term_reps[p[0]]] + \
[op_term_reps[glbl][pi] for glbl, pi in zip(circuit, p[1:-1])] + \
[E_term_reps[p[-1]]]
factor_list_lens = list(map(len, factor_lists))
Einds = E_indices[p[-1]] # specifies which E-vec index each of E_term_reps[p[-1]] corresponds to
if any([len(fl) == 0 for fl in factor_lists]): continue
#print("DB partition = ",p, "listlens = ",[len(fl) for fl in factor_lists])
if fastmode: # filter factor_lists to matrix-compose all length-1 lists
leftSaved = [None] * (len(factor_lists) - 1) # saved[i] is state after i-th
rightSaved = [None] * (len(factor_lists) - 1) # factor has been applied
coeffSaved = [None] * (len(factor_lists) - 1)
last_index = len(factor_lists) - 1
for incd, fi in _lt.incd_product(*[range(l) for l in factor_list_lens]):
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(fi)]
if incd == 0: # need to re-evaluate rho vector
rhoVecL = factors[0].pre_state # Note: `factor` is a rep & so are it's ops
for f in factors[0].pre_ops:
rhoVecL = f.acton(rhoVecL)
leftSaved[0] = rhoVecL
rhoVecR = factors[0].post_state
for f in factors[0].post_ops:
rhoVecR = f.acton(rhoVecR)
rightSaved[0] = rhoVecR
coeff = factors[0].coeff
coeffSaved[0] = coeff
incd += 1
else:
rhoVecL = leftSaved[incd - 1]
rhoVecR = rightSaved[incd - 1]
coeff = coeffSaved[incd - 1]
# propagate left and right states, saving as we go
for i in range(incd, last_index):
for f in factors[i].pre_ops:
rhoVecL = f.acton(rhoVecL)
leftSaved[i] = rhoVecL
for f in factors[i].post_ops:
rhoVecR = f.acton(rhoVecR)
rightSaved[i] = rhoVecR
coeff = coeff.mult(factors[i].coeff)
coeffSaved[i] = coeff
# for the last index, no need to save, and need to construct
# and apply effect vector
#HERE - add something like:
# if factors[-1].opname == cur_effect_opname: (or opint in C-case)
# <skip application of post_ops & preops - just load from (new) saved slot get pLeft & pRight>
for f in factors[-1].pre_ops:
rhoVecL = f.acton(rhoVecL)
E = factors[-1].post_effect # effect representation
pLeft = E.amplitude(rhoVecL)
#Same for post_ops and rhoVecR
for f in factors[-1].post_ops:
rhoVecR = f.acton(rhoVecR)
E = factors[-1].pre_effect
pRight = _np.conjugate(E.amplitude(rhoVecR))
#print("DB PYTHON: final block: pLeft=",pLeft," pRight=",pRight)
res = coeff.mult(factors[-1].coeff)
res.scale((pLeft * pRight))
#print("DB PYTHON: result = ",res)
final_factor_indx = fi[-1]
Ei = Einds[final_factor_indx] # final "factor" index == E-vector index
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei] += res # could add_inplace?
#print("DB PYTHON: prps[%d] = " % Ei, prps[Ei])
else: # non-fast mode
last_index = len(factor_lists) - 1
for fi in _itertools.product(*[range(l) for l in factor_list_lens]):
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(fi)]
res = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pLeft = _unitary_sim_pre(factors, comm, memLimit)
pRight = _unitary_sim_post(factors, comm, memLimit)
# if not self.unitary_evolution else 1.0
res.scale((pLeft * pRight))
final_factor_indx = fi[-1]
Ei = Einds[final_factor_indx] # final "factor" index == E-vector index
# print("DB: pr_as_poly ", fi, " coeffs=", [f.coeff for f in factors],
# " pLeft=", pLeft, " pRight=", pRight, "res=", res)
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei] += res # add_inplace?
#print("DB pr_as_poly running prps[",Ei,"] =",prps[Ei])
# #DEBUG!!!
# db_nfactors = [len(l) for l in factor_lists]
# db_totfactors = _np.product(db_nfactors)
# db_factor_cnt += db_totfactors
# DEBUG_FCOUNT += db_totfactors
# db_part_cnt += 1
# print("DB: pr_as_poly partition=",p,
# "(cnt ",db_part_cnt," with ",db_nfactors," factors (cnt=",db_factor_cnt,")")
#print("DONE -> FCOUNT=",DEBUG_FCOUNT)
return prps # can be a list of polys
def SV_prs_directly(calc, rholabel, elabels, circuit, repcache, comm=None, memLimit=None, fastmode=True, wtTol=0.0,
resetTermWeights=True, debug=None):
#return _prs_directly(calc, rholabel, elabels, circuit, comm, memLimit, fastmode)
raise NotImplementedError("No direct mode yet")
def SB_prs_directly(calc, rholabel, elabels, circuit, repcache, comm=None, memLimit=None, fastmode=True, wtTol=0.0,
resetTermWeights=True, debug=None):
#return _prs_directly(calc, rholabel, elabels, circuit, comm, memLimit, fastmode)
raise NotImplementedError("No direct mode yet")
def SV_refresh_magnitudes_in_repcache(repcache, paramvec):
from ..opcalc import bulk_eval_compact_polys as _bulk_eval_compact_polys
for repcel in repcache.values():
#repcel = <RepCacheEl?>repcel
for termrep in repcel[0]: # first element of tuple contains list of term-reps
v, c = termrep.coeff.compact_complex()
coeff_array = _bulk_eval_compact_polys(v, c, paramvec, (1,), dtype="complex")
termrep.set_magnitude_only(abs(coeff_array[0]))
def SV_find_best_pathmagnitude_threshold(calc, rholabel, elabels, circuit, repcache, opcache, circuitsetup_cache,
comm=None, memLimit=None, pathmagnitude_gap=0.0, min_term_mag=0.01,
max_paths=500, threshold_guess=0.0):
return _find_best_pathmagnitude_threshold(calc, rholabel, elabels, circuit, repcache, opcache, circuitsetup_cache,
comm, memLimit, pathmagnitude_gap, min_term_mag, max_paths,
threshold_guess)
def SB_find_best_pathmagnitude_threshold(calc, rholabel, elabels, circuit, repcache, opcache, circuitsetup_cache,
comm=None, memLimit=None, pathmagnitude_gap=0.0, min_term_mag=0.01,
max_paths=500, threshold_guess=0.0):
return _find_best_pathmagnitude_threshold(calc, rholabel, elabels, circuit, repcache, opcache, circuitsetup_cache,
comm, memLimit, pathmagnitude_gap, min_term_mag, max_paths,
threshold_guess)
def SV_compute_pruned_path_polys_given_threshold(threshold, calc, rholabel, elabels, circuit, repcache, opcache,
circuitsetup_cache, comm=None, memLimit=None, fastmode=True):
return _compute_pruned_path_polys_given_threshold(threshold, calc, rholabel, elabels, circuit, repcache, opcache,
circuitsetup_cache, comm, memLimit, fastmode)
def SB_compute_pruned_path_polys_given_threshold(threshold, calc, rholabel, elabels, circuit, repcache, opcache,
circuitsetup_cache, comm=None, memLimit=None, fastmode=True):
return _compute_pruned_path_polys_given_threshold(threshold, calc, rholabel, elabels, circuit, repcache, opcache,
circuitsetup_cache, comm, memLimit, fastmode)
def SV_circuit_achieved_and_max_sopm(calc, rholabel, elabels, circuit, repcache, opcache, threshold, min_term_mag):
""" TODO: docstring """
mpv = calc.Np # max_poly_vars
distinct_gateLabels = sorted(set(circuit))
op_term_reps = {}
op_foat_indices = {}
for glbl in distinct_gateLabels:
if glbl not in repcache:
hmterms, foat_indices = calc.sos.get_operation(glbl).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
repcache[glbl] = ([t.torep() for t in hmterms], foat_indices)
op_term_reps[glbl], op_foat_indices[glbl] = repcache[glbl]
if rholabel not in repcache:
hmterms, foat_indices = calc.sos.get_prep(rholabel).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
repcache[rholabel] = ([t.torep() for t in hmterms], foat_indices)
rho_term_reps, rho_foat_indices = repcache[rholabel]
elabels = tuple(elabels) # so hashable
if elabels not in repcache:
E_term_indices_and_reps = []
for i, elbl in enumerate(elabels):
hmterms, foat_indices = calc.sos.get_effect(elbl).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
E_term_indices_and_reps.extend(
[(i, t.torep(), t.magnitude, bool(j in foat_indices)) for j, t in enumerate(hmterms)])
#Sort all terms by magnitude
E_term_indices_and_reps.sort(key=lambda x: x[2], reverse=True)
E_term_reps = [x[1] for x in E_term_indices_and_reps]
E_indices = [x[0] for x in E_term_indices_and_reps]
E_foat_indices = [j for j, x in enumerate(E_term_indices_and_reps) if x[3] is True]
repcache[elabels] = (E_term_reps, E_indices, E_foat_indices)
E_term_reps, E_indices, E_foat_indices = repcache[elabels]
factor_lists = [rho_term_reps] + \
[op_term_reps[glbl] for glbl in circuit] + \
[E_term_reps]
foat_indices_per_op = [rho_foat_indices] + [op_foat_indices[glbl] for glbl in circuit] + [E_foat_indices]
ops = [calc.sos.get_prep(rholabel)] + [calc.sos.get_operation(glbl) for glbl in circuit]
max_sum_of_pathmags = _np.product([op.get_total_term_magnitude() for op in ops])
max_sum_of_pathmags = _np.array(
[max_sum_of_pathmags * calc.sos.get_effect(elbl).get_total_term_magnitude() for elbl in elabels], 'd')
mag = _np.zeros(len(elabels), 'd')
nPaths = _np.zeros(len(elabels), int)
def count_path(b, mg, incd):
mag[E_indices[b[-1]]] += mg
nPaths[E_indices[b[-1]]] += 1
traverse_paths_upto_threshold(factor_lists, threshold, len(elabels),
foat_indices_per_op, count_path) # sets mag and nPaths
return mag, max_sum_of_pathmags
#threshold, npaths, achieved_sum_of_pathmags = pathmagnitude_threshold(
# factor_lists, E_indices, len(elabels), target_sum_of_pathmags, foat_indices_per_op,
# initial_threshold=current_threshold, min_threshold=pathmagnitude_gap / 1000.0, max_npaths=max_paths)
global_cnt = 0
#Base case which works for both SV and SB evolution types thanks to Python's duck typing
def _find_best_pathmagnitude_threshold(calc, rholabel, elabels, circuit, repcache, opcache, circuitsetup_cache, comm,
memLimit, pathmagnitude_gap, min_term_mag, max_paths, threshold_guess):
"""
Computes probabilities for multiple spam-tuples of `circuit`
Parameters
----------
calc : TermForwardSimulator
The calculator object holding vital information for the computation.
rholabel : Label
Prep label for *all* the probabilities to compute.
elabels : list
List of effect labels, one per probability to compute. The ordering
of `elabels` determines the ordering of the returned probability
polynomials.
circuit : Circuit
The gate sequence to sandwich between the prep and effect labels.
repcache : dict, optional
Dictionary used to cache operator representations to speed up future
calls to this function that would use the same set of operations.
opcache : dict, optional
Dictionary used to cache operator objects to speed up future
calls to this function that would use the same set of operations.
circuitsetup_cache : dict, optional
Dictionary used to cache preparation specific to this function, to
speed up repeated calls using the same circuit and set of parameters,
including the same repcache and opcache.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes.
pathmagnitude_gap : float, optional
The amount less than the perfect sum-of-path-magnitudes that
is desired. This sets the target sum-of-path-magnitudes for each
circuit -- the threshold that determines how many paths are added.
min_term_mag : float, optional
A technical parameter to the path pruning algorithm; this value
sets a threshold for how small a term magnitude (one factor in
a path magnitude) must be before it is removed from consideration
entirely (to limit the number of even *potential* paths). Terms
with a magnitude lower than this values are neglected.
max_paths : int, optional
The maximum number of paths allowed per circuit outcome.
threshold_guess : float, optional
In the search for a good pathmagnitude threshold, this value is
used as the starting point. If 0.0 is given, a default value is used.
Returns
-------
npaths : int
the number of paths that were included.
threshold : float
the path-magnitude threshold used.
target_sopm : float
The desired sum-of-path-magnitudes. This is `pathmagnitude_gap`
less than the perfect "all-paths" sum. This sums together the
contributions of different effects.
achieved_sopm : float
The achieved sum-of-path-magnitudes. Ideally this would equal
`target_sopm`. (This also sums together the contributions of
different effects.)
"""
if circuitsetup_cache is None: circuitsetup_cache = {}
if circuit not in circuitsetup_cache:
circuitsetup_cache[circuit] = create_circuitsetup_cacheel(
calc, rholabel, elabels, circuit, repcache, opcache, min_term_mag, calc.Np)
rho_term_reps, op_term_reps, E_term_reps, \
rho_foat_indices, op_foat_indices, E_foat_indices, E_indices = circuitsetup_cache[circuit]
factor_lists = [rho_term_reps] + \
[op_term_reps[glbl] for glbl in circuit] + \
[E_term_reps]
foat_indices_per_op = [rho_foat_indices] + [op_foat_indices[glbl] for glbl in circuit] + [E_foat_indices]
ops = [calc.sos.get_prep(rholabel)] + [calc.sos.get_operation(glbl) for glbl in circuit]
max_sum_of_pathmags = _np.product([op.get_total_term_magnitude() for op in ops])
max_sum_of_pathmags = _np.array(
[max_sum_of_pathmags * calc.sos.get_effect(elbl).get_total_term_magnitude() for elbl in elabels], 'd')
target_sum_of_pathmags = max_sum_of_pathmags - pathmagnitude_gap # absolute gap
#target_sum_of_pathmags = max_sum_of_pathmags * (1.0 - pathmagnitude_gap) # relative gap
threshold, npaths, achieved_sum_of_pathmags = pathmagnitude_threshold(
factor_lists, E_indices, len(elabels), target_sum_of_pathmags, foat_indices_per_op,
initial_threshold=threshold_guess, min_threshold=pathmagnitude_gap / (3.0 * max_paths), # 3.0 is just heuristic
max_npaths=max_paths)
# above takes an array of target pathmags and gives a single threshold that works for all of them (all E-indices)
# TODO REMOVE
#print("Threshold = ", threshold, " Paths=", npaths)
#REMOVE (and global_cnt definition above)
#global global_cnt
# print("Threshold = ", threshold, " Paths=", npaths, " tgt=", target_sum_of_pathmags,
# "cnt = ", global_cnt) # , " time=%.3fs" % (_time.time()-t0))
#global_cnt += 1
# #DEBUG TODO REMOVE
# print("---------------------------")
# print("Path threshold = ",threshold, " max=",max_sum_of_pathmags,
# " target=",target_sum_of_pathmags, " achieved=",achieved_sum_of_pathmags)
# print("nPaths = ",npaths)
# print("Num high-magnitude (|coeff|>%g, taylor<=%d) terms: %s" \
# % (min_term_mag, calc.max_order, str([len(factors) for factors in factor_lists])))
# print("Num FOAT: ",[len(inds) for inds in foat_indices_per_op])
# print("---------------------------")
target_miss = sum(achieved_sum_of_pathmags) - sum(target_sum_of_pathmags + pathmagnitude_gap)
if target_miss > 1e-5:
print("Warning: Achieved sum(path mags) exceeds max by ", target_miss, "!!!")
return sum(npaths), threshold, sum(target_sum_of_pathmags), sum(achieved_sum_of_pathmags)
def _compute_pruned_path_polys_given_threshold(threshold, calc, rholabel, elabels, circuit, repcache, opcache,
circuitsetup_cache, comm, memLimit, fastmode):
"""
Computes probabilities for multiple spam-tuples of `circuit`
Parameters
----------
calc : TermForwardSimulator
The calculator object holding vital information for the computation.
rholabel : Label
Prep label for *all* the probabilities to compute.
elabels : list
List of effect labels, one per probability to compute. The ordering
of `elabels` determines the ordering of the returned probability
polynomials.
circuit : Circuit
The gate sequence to sandwich between the prep and effect labels.
repcache : dict, optional
Dictionary used to cache operator representations to speed up future
calls to this function that would use the same set of operations.
opcache : dict, optional
Dictionary used to cache operator objects to speed up future
calls to this function that would use the same set of operations.
circuitsetup_cache : dict, optional
Dictionary used to cache preparation specific to this function, to
speed up repeated calls using the same circuit and set of parameters,
including the same repcache and opcache.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes.
fastmode : bool, optional
A switch between a faster, slighty more memory hungry mode of
computation (`fastmode=True`)and a simpler slower one (`=False`).
Returns
-------
prps : list of PolynomialRep objects
the polynomials for the requested circuit probabilities, computed by
selectively summing up high-magnitude paths.
"""
if circuitsetup_cache is None: circuitsetup_cache = {}
if circuit not in circuitsetup_cache:
circuitsetup_cache[circuit] = create_circuitsetup_cacheel(
calc, rholabel, elabels, circuit, repcache, opcache, calc.min_term_mag, calc.Np)
rho_term_reps, op_term_reps, E_term_reps, \
rho_foat_indices, op_foat_indices, E_foat_indices, E_indices = circuitsetup_cache[circuit]
factor_lists = [rho_term_reps] + \
[op_term_reps[glbl] for glbl in circuit] + \
[E_term_reps]
foat_indices_per_op = [rho_foat_indices] + [op_foat_indices[glbl] for glbl in circuit] + [E_foat_indices]
prps = [None] * len(elabels)
last_index = len(factor_lists) - 1
#print("T1 = %.2fs" % (_time.time()-t0)); t0 = _time.time()
#fastmode = False # REMOVE - was used for DEBUG b/c "_ex" path traversal won't always work w/fast mode
if fastmode == 1: # fastmode
leftSaved = [None] * (len(factor_lists) - 1) # saved[i] is state after i-th
rightSaved = [None] * (len(factor_lists) - 1) # factor has been applied
coeffSaved = [None] * (len(factor_lists) - 1)
def add_path(b, mag, incd):
""" Relies on the fact that paths are iterated over in lexographic order, and `incd`
tells us which index was just incremented (all indices less than this one are
the *same* as the last call). """
# "non-fast" mode is the only way we know to do this, since we don't know what path will come next (no
# ability to cache?)
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(b)]
if incd == 0: # need to re-evaluate rho vector
rhoVecL = factors[0].pre_state # Note: `factor` is a rep & so are it's ops
for f in factors[0].pre_ops:
rhoVecL = f.acton(rhoVecL)
leftSaved[0] = rhoVecL
rhoVecR = factors[0].post_state
for f in factors[0].post_ops:
rhoVecR = f.acton(rhoVecR)
rightSaved[0] = rhoVecR
coeff = factors[0].coeff
coeffSaved[0] = coeff
incd += 1
else:
rhoVecL = leftSaved[incd - 1]
rhoVecR = rightSaved[incd - 1]
coeff = coeffSaved[incd - 1]
# propagate left and right states, saving as we go
for i in range(incd, last_index):
for f in factors[i].pre_ops:
rhoVecL = f.acton(rhoVecL)
leftSaved[i] = rhoVecL
for f in factors[i].post_ops:
rhoVecR = f.acton(rhoVecR)
rightSaved[i] = rhoVecR
coeff = coeff.mult(factors[i].coeff)
coeffSaved[i] = coeff
# for the last index, no need to save, and need to construct
# and apply effect vector
for f in factors[-1].pre_ops:
rhoVecL = f.acton(rhoVecL)
E = factors[-1].post_effect # effect representation
pLeft = E.amplitude(rhoVecL)
#Same for post_ops and rhoVecR
for f in factors[-1].post_ops:
rhoVecR = f.acton(rhoVecR)
E = factors[-1].pre_effect
pRight = _np.conjugate(E.amplitude(rhoVecR))
res = coeff.mult(factors[-1].coeff)
res.scale((pLeft * pRight))
final_factor_indx = b[-1]
Ei = E_indices[final_factor_indx] # final "factor" index == E-vector index
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei].add_inplace(res) # prps[Ei] += res
elif fastmode == 2: # achieved-SOPM mode
def add_path(b, mag, incd):
"""Adds in |pathmag| = |prod(factor_coeffs)| for computing achieved SOPM"""
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(b)]
res = _functools.reduce(lambda x, y: x.mult(y), [f.coeff.abs() for f in factors])
final_factor_indx = b[-1]
Ei = E_indices[final_factor_indx] # final "factor" index == E-vector index
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei].add_inplace(res) # prps[Ei] += res
else:
def add_path(b, mag, incd):
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(b)]
res = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pLeft = _unitary_sim_pre(factors, comm, memLimit)
pRight = _unitary_sim_post(factors, comm, memLimit)
res.scale((pLeft * pRight))
final_factor_indx = b[-1]
Ei = E_indices[final_factor_indx] # final "factor" index == E-vector index
#print("DB: pr_as_poly factor coeff=",coeff," pLeft=",pLeft," pRight=",pRight, "res=",res)
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei].add_inplace(res) # prps[Ei] += res
#print("DB running prps[",Ei,"] =",prps[Ei])
traverse_paths_upto_threshold(factor_lists, threshold, len(
elabels), foat_indices_per_op, add_path) # sets mag and nPaths
#print("T2 = %.2fs" % (_time.time()-t0)); t0 = _time.time()
#max_degrees = []
#for i,factors in enumerate(factor_lists):
# max_degrees.append(max([f.coeff.degree() for f in factors]))
#print("Max degrees = ",max_degrees)
#for Ei,prp in enumerate(prps):
# print(Ei,":", prp.debug_report())
#if db_paramvec is not None:
# for Ei,prp in enumerate(prps):
# print(Ei," => ", prp.evaluate(db_paramvec))
#TODO: REMOVE - most of this is solved, but keep it around for another few commits in case we want to refer back to
#it. - need to fill in some more details, namely how/where we hold weights and log-weights: in reps? in Term objs?
#maybe consider Cython version? need to consider how to perform "fastmode" in this... maybe need to traverse tree
#in some standard order? what about having multiple thresholds for the different elabels... it seems good to try to
#run these calcs in parallel.
# Note: may only need recusive tree traversal to consider incrementing positions *greater* than or equal to the one
# that was just incremented? (this may enforce some iteration ordering amenable to a fastmode calc)
# Note2: when all effects have *same* op-part of terms, just different effect vector, then maybe we could split the
# effect into an op + effect to better use fastmode calc? Or maybe if ordering is right this isn't necessary?
#Add repcache as in cython version -- saves having to *construct* rep objects all the time... just update
#coefficients when needed instead?
#... and we're done!
#TODO: check that prps are PolynomialReps and not Polynomials -- we may have made this change
# in fastreplib.pyx but forgot it here.
return prps
def create_circuitsetup_cacheel(calc, rholabel, elabels, circuit, repcache, opcache, min_term_mag, mpv):
# Construct dict of gate term reps
mpv = calc.Np # max_poly_vars
distinct_gateLabels = sorted(set(circuit))
op_term_reps = {}
op_foat_indices = {}
for glbl in distinct_gateLabels:
if glbl not in repcache:
hmterms, foat_indices = calc.sos.get_operation(glbl).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
repcache[glbl] = ([t.torep() for t in hmterms], foat_indices)
op_term_reps[glbl], op_foat_indices[glbl] = repcache[glbl]
if rholabel not in repcache:
hmterms, foat_indices = calc.sos.get_prep(rholabel).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
repcache[rholabel] = ([t.torep() for t in hmterms], foat_indices)
rho_term_reps, rho_foat_indices = repcache[rholabel]
elabels = tuple(elabels) # so hashable
if elabels not in repcache:
E_term_indices_and_reps = []
for i, elbl in enumerate(elabels):
hmterms, foat_indices = calc.sos.get_effect(elbl).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
E_term_indices_and_reps.extend(
[(i, t.torep(), t.magnitude, bool(j in foat_indices)) for j, t in enumerate(hmterms)])
#Sort all terms by magnitude
E_term_indices_and_reps.sort(key=lambda x: x[2], reverse=True)
E_term_reps = [x[1] for x in E_term_indices_and_reps]
E_indices = [x[0] for x in E_term_indices_and_reps]
E_foat_indices = [j for j, x in enumerate(E_term_indices_and_reps) if x[3] is True]
repcache[elabels] = (E_term_reps, E_indices, E_foat_indices)
E_term_reps, E_indices, E_foat_indices = repcache[elabels]
return (rho_term_reps, op_term_reps, E_term_reps,
rho_foat_indices, op_foat_indices, E_foat_indices,
E_indices)
#Base case which works for both SV and SB evolution types thanks to Python's duck typing
def _prs_as_pruned_polys(calc, rholabel, elabels, circuit, repcache, opcache, comm=None, memLimit=None, fastmode=True,
pathmagnitude_gap=0.0, min_term_mag=0.01, max_paths=500, current_threshold=None,
compute_polyreps=True):
"""
Computes probabilities for multiple spam-tuples of `circuit`
Parameters
----------
calc : TermForwardSimulator
The calculator object holding vital information for the computation.
rholabel : Label
Prep label for *all* the probabilities to compute.
elabels : list
List of effect labels, one per probability to compute. The ordering
of `elabels` determines the ordering of the returned probability
polynomials.
circuit : Circuit
The gate sequence to sandwich between the prep and effect labels.
repcache : dict, optional
Dictionary used to cache operator representations to speed up future
calls to this function that would use the same set of operations.
opcache : dict, optional
Dictionary used to cache operator objects to speed up future
calls to this function that would use the same set of operations.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes.
fastmode : bool, optional
A switch between a faster, slighty more memory hungry mode of
computation (`fastmode=True`)and a simpler slower one (`=False`).
pathmagnitude_gap : float, optional
The amount less than the perfect sum-of-path-magnitudes that
is desired. This sets the target sum-of-path-magnitudes for each
circuit -- the threshold that determines how many paths are added.
min_term_mag : float, optional
A technical parameter to the path pruning algorithm; this value
sets a threshold for how small a term magnitude (one factor in
a path magnitude) must be before it is removed from consideration
entirely (to limit the number of even *potential* paths). Terms
with a magnitude lower than this values are neglected.
current_threshold : float, optional
If the threshold needed to achieve the desired `pathmagnitude_gap`
is greater than this value (i.e. if using current_threshold would
result in *more* paths being computed) then this function will not
compute any paths and exit early, returning `None` in place of the
usual list of polynomial representations.
compute_polyreps: TODO, docstring - whether to just compute sopm or actually compute corresponding polyreps
Returns
-------
prps : list of PolynomialRep objects
the polynomials for the requested circuit probabilities, computed by
selectively summing up high-magnitude paths.
npaths : int
the number of paths that were included.
threshold : float
the path-magnitude threshold used.
target_sopm : float
The desired sum-of-path-magnitudes. This is `pathmagnitude_gap`
less than the perfect "all-paths" sum. This sums together the
contributions of different effects.
achieved_sopm : float
The achieved sum-of-path-magnitudes. Ideally this would equal
`target_sopm`. (This also sums together the contributions of
different effects.)
"""
#t0 = _time.time()
# Construct dict of gate term reps
mpv = calc.Np # max_poly_vars
distinct_gateLabels = sorted(set(circuit))
op_term_reps = {}
op_foat_indices = {}
for glbl in distinct_gateLabels:
if glbl not in repcache:
hmterms, foat_indices = calc.sos.get_operation(glbl).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
repcache[glbl] = ([t.torep() for t in hmterms], foat_indices)
op_term_reps[glbl], op_foat_indices[glbl] = repcache[glbl]
if rholabel not in repcache:
hmterms, foat_indices = calc.sos.get_prep(rholabel).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
repcache[rholabel] = ([t.torep() for t in hmterms], foat_indices)
rho_term_reps, rho_foat_indices = repcache[rholabel]
elabels = tuple(elabels) # so hashable
if elabels not in repcache:
E_term_indices_and_reps = []
for i, elbl in enumerate(elabels):
hmterms, foat_indices = calc.sos.get_effect(elbl).get_highmagnitude_terms(
min_term_mag, max_taylor_order=calc.max_order, max_poly_vars=mpv)
E_term_indices_and_reps.extend(
[(i, t.torep(), t.magnitude, bool(j in foat_indices)) for j, t in enumerate(hmterms)])
#Sort all terms by magnitude
E_term_indices_and_reps.sort(key=lambda x: x[2], reverse=True)
E_term_reps = [x[1] for x in E_term_indices_and_reps]
E_indices = [x[0] for x in E_term_indices_and_reps]
E_foat_indices = [j for j, x in enumerate(E_term_indices_and_reps) if x[3] is True]
repcache[elabels] = (E_term_reps, E_indices, E_foat_indices)
E_term_reps, E_indices, E_foat_indices = repcache[elabels]
prps = [None] * len(elabels)
factor_lists = [rho_term_reps] + \
[op_term_reps[glbl] for glbl in circuit] + \
[E_term_reps]
last_index = len(factor_lists) - 1
foat_indices_per_op = [rho_foat_indices] + [op_foat_indices[glbl] for glbl in circuit] + [E_foat_indices]
ops = [calc.sos.get_prep(rholabel)] + [calc.sos.get_operation(glbl) for glbl in circuit]
max_sum_of_pathmags = _np.product([op.get_total_term_magnitude() for op in ops])
max_sum_of_pathmags = _np.array(
[max_sum_of_pathmags * calc.sos.get_effect(elbl).get_total_term_magnitude() for elbl in elabels], 'd')
target_sum_of_pathmags = max_sum_of_pathmags - pathmagnitude_gap # absolute gap
#target_sum_of_pathmags = max_sum_of_pathmags * (1.0 - pathmagnitude_gap) # relative gap
threshold, npaths, achieved_sum_of_pathmags = pathmagnitude_threshold(
factor_lists, E_indices, len(elabels), target_sum_of_pathmags, foat_indices_per_op,
initial_threshold=current_threshold,
min_threshold=pathmagnitude_gap / (3.0 * max_paths), # 3.0 is just heuristic
max_npaths=max_paths)
# above takes an array of target pathmags and gives a single threshold that works for all of them (all E-indices)
#print("Threshold = ", threshold, " Paths=", npaths)
#REMOVE (and global_cnt definition above)
#global global_cnt
# print("Threshold = ", threshold, " Paths=", npaths, " tgt=", target_sum_of_pathmags,
# "cnt = ", global_cnt) # , " time=%.3fs" % (_time.time()-t0))
#global_cnt += 1
# no polyreps needed, e.g. just keep existing (cached) polys
if not compute_polyreps or (current_threshold >= 0 and threshold >= current_threshold):
return [], sum(npaths), threshold, sum(target_sum_of_pathmags), sum(achieved_sum_of_pathmags)
#print("T1 = %.2fs" % (_time.time()-t0)); t0 = _time.time()
#fastmode = False # REMOVE - was used for DEBUG b/c "_ex" path traversal won't always work w/fast mode
if fastmode:
leftSaved = [None] * (len(factor_lists) - 1) # saved[i] is state after i-th
rightSaved = [None] * (len(factor_lists) - 1) # factor has been applied
coeffSaved = [None] * (len(factor_lists) - 1)
def add_path(b, mag, incd):
""" Relies on the fact that paths are iterated over in lexographic order, and `incd`
tells us which index was just incremented (all indices less than this one are
the *same* as the last call). """
# "non-fast" mode is the only way we know to do this, since we don't know what path will come next (no
# ability to cache?)
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(b)]
if incd == 0: # need to re-evaluate rho vector
rhoVecL = factors[0].pre_state # Note: `factor` is a rep & so are it's ops
for f in factors[0].pre_ops:
rhoVecL = f.acton(rhoVecL)
leftSaved[0] = rhoVecL
rhoVecR = factors[0].post_state
for f in factors[0].post_ops:
rhoVecR = f.acton(rhoVecR)
rightSaved[0] = rhoVecR
coeff = factors[0].coeff
coeffSaved[0] = coeff
incd += 1
else:
rhoVecL = leftSaved[incd - 1]
rhoVecR = rightSaved[incd - 1]
coeff = coeffSaved[incd - 1]
# propagate left and right states, saving as we go
for i in range(incd, last_index):
for f in factors[i].pre_ops:
rhoVecL = f.acton(rhoVecL)
leftSaved[i] = rhoVecL
for f in factors[i].post_ops:
rhoVecR = f.acton(rhoVecR)
rightSaved[i] = rhoVecR
coeff = coeff.mult(factors[i].coeff)
coeffSaved[i] = coeff
# for the last index, no need to save, and need to construct
# and apply effect vector
for f in factors[-1].pre_ops:
rhoVecL = f.acton(rhoVecL)
E = factors[-1].post_effect # effect representation
pLeft = E.amplitude(rhoVecL)
#Same for post_ops and rhoVecR
for f in factors[-1].post_ops:
rhoVecR = f.acton(rhoVecR)
E = factors[-1].pre_effect
pRight = _np.conjugate(E.amplitude(rhoVecR))
res = coeff.mult(factors[-1].coeff)
res.scale((pLeft * pRight))
final_factor_indx = b[-1]
Ei = E_indices[final_factor_indx] # final "factor" index == E-vector index
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei].add_inplace(res) # prps[Ei] += res
else:
def add_path(b, mag, incd):
factors = [factor_lists[i][factorInd] for i, factorInd in enumerate(b)]
res = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pLeft = _unitary_sim_pre(factors, comm, memLimit)
pRight = _unitary_sim_post(factors, comm, memLimit)
res.scale((pLeft * pRight))
final_factor_indx = b[-1]
Ei = E_indices[final_factor_indx] # final "factor" index == E-vector index
#print("DB: pr_as_poly factor coeff=",coeff," pLeft=",pLeft," pRight=",pRight, "res=",res)
if prps[Ei] is None: prps[Ei] = res
else: prps[Ei].add_inplace(res) # prps[Ei] += res
#print("DB running prps[",Ei,"] =",prps[Ei])
traverse_paths_upto_threshold(factor_lists, threshold, len(
elabels), foat_indices_per_op, add_path) # sets mag and nPaths
#print("T2 = %.2fs" % (_time.time()-t0)); t0 = _time.time()
# #DEBUG
# print("---------------------------")
# print("Path threshold = ",threshold, " max=",max_sum_of_pathmags,
# " target=",target_sum_of_pathmags, " achieved=",achieved_sum_of_pathmags)
# print("nPaths = ",npaths)
# print("Num high-magnitude (|coeff|>%g, taylor<=%d) terms: %s" \
# % (min_term_mag, calc.max_order, str([len(factors) for factors in factor_lists])))
# print("Num FOAT: ",[len(inds) for inds in foat_indices_per_op])
# print("---------------------------")
#max_degrees = []
#for i,factors in enumerate(factor_lists):
# max_degrees.append(max([f.coeff.degree() for f in factors]))
#print("Max degrees = ",max_degrees)
#for Ei,prp in enumerate(prps):
# print(Ei,":", prp.debug_report())
#if db_paramvec is not None:
# for Ei,prp in enumerate(prps):
# print(Ei," => ", prp.evaluate(db_paramvec))
#TODO: REMOVE - most of this is solved, but keep it around for another few commits in case we want to refer back to
#it. - need to fill in some more details, namely how/where we hold weights and log-weights: in reps? in Term objs?
#maybe consider Cython version? need to consider how to perform "fastmode" in this... maybe need to traverse tree
#in some standard order? what about having multiple thresholds for the different elabels... it seems good to try to
#run these calcs in parallel.
# Note: may only need recusive tree traversal to consider incrementing positions *greater* than or equal to the one
# that was just incremented? (this may enforce some iteration ordering amenable to a fastmode calc)
# Note2: when all effects have *same* op-part of terms, just different effect vector, then maybe we could split the
# effect into an op + effect to better use fastmode calc? Or maybe if ordering is right this isn't necessary?
#Add repcache as in cython version -- saves having to *construct* rep objects all the time... just update
#coefficients when needed instead?
#... and we're done!
target_miss = sum(achieved_sum_of_pathmags) - sum(target_sum_of_pathmags + pathmagnitude_gap)
if target_miss > 1e-5:
print("Warning: Achieved sum(path mags) exceeds max by ", target_miss, "!!!")
#TODO: check that prps are PolynomialReps and not Polynomials -- we may have made this change
# in fastreplib.pyx but forgot it here.
return prps, sum(npaths), threshold, sum(target_sum_of_pathmags), sum(achieved_sum_of_pathmags)
# foat = first-order always-traversed
def traverse_paths_upto_threshold(oprep_lists, pathmag_threshold, num_elabels, foat_indices_per_op,
fn_visitpath, debug=False):
"""
Traverse all the paths up to some path-magnitude threshold, calling
`fn_visitpath` for each one.
Parameters
----------
oprep_lists : list of lists
representations for the terms of each layer of the circuit whose
outcome probability we're computing, including prep and POVM layers.
`oprep_lists[i]` is a list of the terms available to choose from
for the i-th circuit layer, ordered by increasing term-magnitude.
pathmag_threshold : float
the path-magnitude threshold to use.
num_elabels : int
The number of effect labels corresponding whose terms are all
amassed in the in final `oprep_lists[-1]` list (knowing which
elements of `oprep_lists[-1]` correspond to which effect isn't
necessary for this function).
foat_indices_per_op : list
A list of lists of integers, such that `foat_indices_per_op[i]`
is a list of indices into `oprep_lists[-1]` that marks out which
terms are first-order (Taylor) terms that should therefore always
be traversed regardless of their term-magnitude (foat = first-order-
always-traverse).
fn_visitpath : function
A function called for each path that is traversed. Arguments
are `(term_indices, magnitude, incd)` where `term_indices` is
an array of integers giving the index into each `oprep_lists[i]`
list, `magnitude` is the path magnitude, and `incd` is the index
of the circuit layer that was just incremented (all elements of
`term_indices` less than this index are guaranteed to be the same
as they were in the last call to `fn_visitpath`, and this can be
used for faster path evaluation.
max_npaths : int, optional
The maximum number of paths to traverse. If this is 0, then there
is no limit. Otherwise this function will return as soon as
`max_npaths` paths are traversed.
debug : bool, optional
Whether to print additional debug info.
Returns
-------
None
""" # zot = zero-order-terms
n = len(oprep_lists)
nops = [len(oprep_list) for oprep_list in oprep_lists]
b = [0] * n # root
log_thres = _math.log10(pathmag_threshold)
##TODO REMOVE
#if debug:
# if debug > 1: print("BEGIN TRAVERSAL")
# accepted_bs_and_mags = {}
def traverse_tree(root, incd, log_thres, current_mag, current_logmag, order, current_nzeros):
""" first_order means only one b[i] is incremented, e.g. b == [0 1 0] or [4 0 0] """
b = root
#print("BEGIN: ",root)
for i in reversed(range(incd, n)):
if b[i] + 1 == nops[i]: continue
b[i] += 1
if order == 0: # then incd doesn't matter b/c can inc anything to become 1st order
sub_order = 1 if (i != n - 1 or b[i] >= num_elabels) else 0
elif order == 1:
# we started with a first order term where incd was incremented, and now
# we're incrementing something else
sub_order = 1 if i == incd else 2 # signifies anything over 1st order where >1 column has be inc'd
else:
sub_order = order
logmag = current_logmag + (oprep_lists[i][b[i]].logmagnitude - oprep_lists[i][b[i] - 1].logmagnitude)
#print("Trying: ",b)
if logmag >= log_thres: # or sub_order == 0:
numerator = oprep_lists[i][b[i]].magnitude
denom = oprep_lists[i][b[i] - 1].magnitude
nzeros = current_nzeros
if denom == 0:
denom = SMALL; nzeros -= 1
if numerator == 0:
numerator = SMALL; nzeros += 1
mag = current_mag * (numerator / denom)
actual_mag = mag if (nzeros == 0) else 0.0 # magnitude is actually zero if nzeros > 0
if fn_visitpath(b, actual_mag, i): return True # fn_visitpath can signal early return
if traverse_tree(b, i, log_thres, mag, logmag, sub_order, nzeros):
# add any allowed paths beneath this one
return True
elif sub_order <= 1:
#We've rejected term-index b[i] (in column i) because it's too small - the only reason
# to accept b[i] or term indices higher than it is to include "foat" terms, so we now
# iterate through any remaining foat indices for this column (we've accepted all lower
# values of b[i], or we wouldn't be here). Note that we just need to visit the path,
# we don't need to traverse down, since we know the path magnitude is already too low.
orig_bi = b[i]
for j in foat_indices_per_op[i]:
if j >= orig_bi:
b[i] = j
nzeros = current_nzeros
numerator = oprep_lists[i][b[i]].magnitude
denom = oprep_lists[i][orig_bi - 1].magnitude
if denom == 0: denom = SMALL
#if numerator == 0: nzeros += 1 # not needed b/c we just leave numerator = 0
# OK if mag == 0 as it's not passed to any recursive calls
mag = current_mag * (numerator / denom)
actual_mag = mag if (nzeros == 0) else 0.0 # magnitude is actually zero if nzeros > 0
if fn_visitpath(b, actual_mag, i): return True
if i != n - 1:
# if we're not incrementing (from a zero-order term) the final index, then we
# need to to increment it until we hit num_elabels (*all* zero-th order paths)
orig_bn = b[n - 1]
for k in range(1, num_elabels):
b[n - 1] = k
numerator = oprep_lists[n - 1][b[n - 1]].magnitude
denom = oprep_lists[i][orig_bn].magnitude
if denom == 0: denom = SMALL
# zero if either numerator == 0 or mag == 0 from above.
mag2 = mag * (numerator / denom)
if fn_visitpath(b, mag2 if (nzeros == 0) else 0.0, n - 1): return True
b[n - 1] = orig_bn
b[i] = orig_bi
b[i] -= 1 # so we don't have to copy b
#print("END: ",root)
return False # return value == "do we need to terminate traversal immediately?"
current_mag = 1.0; current_logmag = 0.0
fn_visitpath(b, current_mag, 0) # visit root (all 0s) path
traverse_tree(b, 0, log_thres, current_mag, current_logmag, 0, 0)
return
# TODO REMOVE: method to traverse paths until the result converges, but I don't think this is well justified
# def traverse_paths_upto_threshold_ex(oprep_lists, high_threshold, low_threshold, num_elabels, foat_indices_per_op,
# fn_visitpath, debug=False):
# """
# TODO: docstring
# """
# # zot = zero-order-terms
# n = len(oprep_lists)
# nops = [len(oprep_list) for oprep_list in oprep_lists]
# b = [0] * n # root
# log_thres_high = _np.log10(high_threshold) # a previous threshold: we've already visited everything above this
# log_thres_low = _np.log10(low_threshold) # visit everything above this threshold
#
# ##TODO REMOVE
# #if debug:
# # if debug > 1: print("BEGIN TRAVERSAL")
# # accepted_bs_and_mags = {}
#
# def traverse_tree(root, incd, log_thres_high, log_thres_low, current_mag, current_logmag, order):
# """ first_order means only one b[i] is incremented, e.g. b == [0 1 0] or [4 0 0] """
# b = root
# #print("BEGIN: ",root)
# for i in reversed(range(incd, n)):
# if b[i] + 1 == nops[i]: continue
# b[i] += 1
#
# if order == 0: # then incd doesn't matter b/c can inc anything to become 1st order
# sub_order = 1 if (i != n - 1 or b[i] >= num_elabels) else 0
# elif order == 1:
# # we started with a first order term where incd was incremented, and now
# # we're incrementing something else
# sub_order = 1 if i == incd else 2 # signifies anything over 1st order where >1 column has be inc'd
# else:
# sub_order = order
#
# logmag = current_logmag + (oprep_lists[i][b[i]].logmagnitude - oprep_lists[i][b[i] - 1].logmagnitude)
# #print("Trying: ",b)
# if logmag >= log_thres_low: # or sub_order == 0:
# if oprep_lists[i][b[i] - 1].magnitude == 0:
# mag = 0
# else:
# mag = current_mag * (oprep_lists[i][b[i]].magnitude / oprep_lists[i][b[i] - 1].magnitude)
#
# if logmag > log_thres_high:
# if fn_visitpath(b, mag, i): return True # fn_visitpath can signal early return
# if traverse_tree(b, i, log_thres_high, log_thres_low, mag, logmag, sub_order):
# # add any allowed paths beneath this one
# return True
# elif sub_order <= 1 and high_threshold >= 1.0:
# #We've rejected term-index b[i] (in column i) because it's too small - the only reason
# # to accept b[i] or term indices higher than it is to include "foat" terms, so we now
# # iterate through any remaining foat indices for this column (we've accepted all lower
# # values of b[i], or we wouldn't be here). Note that we just need to visit the path,
# # we don't need to traverse down, since we know the path magnitude is already too low.
# orig_bi = b[i]
# for j in foat_indices_per_op[i]:
# if j >= orig_bi:
# b[i] = j
# mag = 0 if oprep_lists[i][orig_bi - 1].magnitude == 0 else \
# current_mag * (oprep_lists[i][b[i]].magnitude / oprep_lists[i][orig_bi - 1].magnitude)
#
# if fn_visitpath(b, mag, i): return True
#
# if i != n - 1:
# # if we're not incrementing (from a zero-order term) the final index, then we
# # need to to increment it until we hit num_elabels (*all* zero-th order paths)
# orig_bn = b[n - 1]
# for k in range(1, num_elabels):
# b[n - 1] = k
# mag2 = mag * (oprep_lists[n - 1][b[n - 1]].magnitude
# / oprep_lists[i][orig_bn].magnitude)
# if fn_visitpath(b, mag2, n - 1): return True
#
# b[n - 1] = orig_bn
#
# b[i] = orig_bi
#
# b[i] -= 1 # so we don't have to copy b
# #print("END: ",root)
# return False # return value == "do we need to terminate traversal immediately?"
#
# current_mag = 1.0; current_logmag = 0.0
# fn_visitpath(b, current_mag, 0) # visit root (all 0s) path
# return traverse_tree(b, 0, log_thres_high, log_thres_low, current_mag, current_logmag, 0)
# #returns whether fn_visitpath caused us to exit
def pathmagnitude_threshold(oprep_lists, E_indices, num_elabels, target_sum_of_pathmags,
foat_indices_per_op=None, initial_threshold=0.1,
min_threshold=1e-10, max_npaths=1000000):
"""
Find the pathmagnitude-threshold needed to achieve some target sum-of-path-magnitudes:
so that the sum of all the path-magnitudes greater than this threshold achieve the
target (or get as close as we can).
Parameters
----------
oprep_lists : list of lists
representations for the terms of each layer of the circuit whose
outcome probability we're computing, including prep and POVM layers.
`oprep_lists[i]` is a list of the terms available to choose from
for the i-th circuit layer, ordered by increasing term-magnitude.
E_indices : numpy array
The effect-vector index for each element of `oprep_lists[-1]`
(representations for *all* effect vectors exist all together
in `oprep_lists[-1]`).
num_elabels : int
The total number of different effects whose reps appear in
`oprep_lists[-1]` (also one more than the largest index in
`E_indices`.
target_sum_of_pathmags : array
An array of floats of length `num_elabels` giving the target sum of path
magnitudes desired for each effect (separately).
foat_indices_per_op : list
A list of lists of integers, such that `foat_indices_per_op[i]`
is a list of indices into `oprep_lists[-1]` that marks out which
terms are first-order (Taylor) terms that should therefore always
be traversed regardless of their term-magnitude (foat = first-order-
always-traverse).
initial_threshold : float
The starting pathmagnitude threshold to try (this function uses
an iterative procedure to find a threshold).
min_threshold : float
The smallest threshold allowed. If this amount is reached, it
is just returned and searching stops.
max_npaths : int, optional
The maximum number of paths allowed per effect.
Returns
-------
threshold : float
The obtained pathmagnitude threshold.
npaths : numpy array
An array of length `num_elabels` giving the number of paths selected
for each of the effect vectors.
achieved_sopm : numpy array
An array of length `num_elabels` giving the achieved sum-of-path-
magnitudes for each of the effect vectors.
"""
nIters = 0
threshold = initial_threshold if (initial_threshold >= 0) else 0.1 # default value
target_mag = target_sum_of_pathmags
#print("Target magnitude: ",target_mag)
threshold_upper_bound = 1.0
threshold_lower_bound = None
#db_last_threshold = None #DEBUG TODO REMOVE
#mag = 0; nPaths = 0
if foat_indices_per_op is None:
foat_indices_per_op = [()] * len(oprep_lists)
# REMOVE comm = memLimit = None # TODO: make these arguments later?
def count_path(b, mg, incd):
mag[E_indices[b[-1]]] += mg
nPaths[E_indices[b[-1]]] += 1
# REMOVE?
# #Instead of magnitude, accumulate actual current path contribution that we can test for convergence
# factors = [oprep_lists[i][factorInd] for i, factorInd in enumerate(b)]
# res = _np.product([f.evaluated_coeff for f in factors])
# pLeft = _unitary_sim_pre(factors, comm, memLimit)
# pRight = _unitary_sim_post(factors, comm, memLimit)
# res *= (pLeft * pRight)
#
# final_factor_indx = b[-1]
# Ei = E_indices[final_factor_indx] # final "factor" index == E-vector index
# integrals[Ei] += res
return (nPaths[E_indices[b[-1]]] == max_npaths) # trigger immediate return if hit max_npaths
while nIters < 100: # TODO: allow setting max_nIters as an arg?
mag = _np.zeros(num_elabels, 'd')
nPaths = _np.zeros(num_elabels, int)
traverse_paths_upto_threshold(oprep_lists, threshold, num_elabels,
foat_indices_per_op, count_path) # sets mag and nPaths
assert(max_npaths == 0 or _np.all(nPaths <= max_npaths)), "MAX PATHS EXCEEDED! (%s)" % nPaths
if _np.all(mag >= target_mag) or _np.any(nPaths >= max_npaths): # try larger threshold
threshold_lower_bound = threshold
if threshold_upper_bound is not None:
threshold = (threshold_upper_bound + threshold_lower_bound) / 2
else: threshold *= 2
else: # try smaller threshold
threshold_upper_bound = threshold
if threshold_lower_bound is not None:
threshold = (threshold_upper_bound + threshold_lower_bound) / 2
else: threshold /= 2
if threshold_upper_bound is not None and threshold_lower_bound is not None and \
(threshold_upper_bound - threshold_lower_bound) / threshold_upper_bound < 1e-3:
#print("Converged after %d iters!" % nIters)
break
if threshold_upper_bound < min_threshold: # could also just set min_threshold to be the lower bound initially?
threshold_upper_bound = threshold_lower_bound = min_threshold
break
nIters += 1
#Run path traversal once more to count final number of paths
def count_path_nomax(b, mg, incd):
# never returns True - we want to check *threshold* alone selects correct # of paths
mag[E_indices[b[-1]]] += mg
nPaths[E_indices[b[-1]]] += 1
mag = _np.zeros(num_elabels, 'd')
# integrals = _np.zeros(num_elabels, 'd') REMOVE
nPaths = _np.zeros(num_elabels, int)
traverse_paths_upto_threshold(oprep_lists, threshold_lower_bound, num_elabels,
foat_indices_per_op, count_path_nomax) # sets mag and nPaths
#TODO REMOVE - idea of truncating based on convergence of sum seems flawed - can't detect long tails
# last_threshold = 1e10 # something huge
# threshold = initial_threshold # needs to be < 1
# converged = False
#
# while not converged:
# converged = traverse_paths_upto_threshold_ex(oprep_lists, last_threshold, threshold,
# num_elabels, foat_indices_per_op, count_path)
# last_threshold = threshold
# threshold /= 2
return threshold_lower_bound, nPaths, mag
def _unitary_sim_pre(complete_factors, comm, memLimit):
rhoVec = complete_factors[0].pre_state # a prep representation
for f in complete_factors[0].pre_ops:
rhoVec = f.acton(rhoVec)
for f in _itertools.chain(*[f.pre_ops for f in complete_factors[1:-1]]):
rhoVec = f.acton(rhoVec) # LEXICOGRAPHICAL VS MATRIX ORDER
for f in complete_factors[-1].pre_ops:
rhoVec = f.acton(rhoVec)
EVec = complete_factors[-1].post_effect
return EVec.amplitude(rhoVec)
def _unitary_sim_post(complete_factors, comm, memLimit):
rhoVec = complete_factors[0].post_state # a prep representation
for f in complete_factors[0].post_ops:
rhoVec = f.acton(rhoVec)
for f in _itertools.chain(*[f.post_ops for f in complete_factors[1:-1]]):
rhoVec = f.acton(rhoVec) # LEXICOGRAPHICAL VS MATRIX ORDER
for f in complete_factors[-1].post_ops:
rhoVec = f.acton(rhoVec)
EVec = complete_factors[-1].pre_effect
return _np.conjugate(EVec.amplitude(rhoVec)) # conjugate for same reason as above
|
#-----------------------------------------------------------------------------
# Title : PyRogue base module - Model Class
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import rogue.interfaces.memory as rim
def wordCount(bits, wordSize):
ret = bits // wordSize
if (bits % wordSize != 0 or bits == 0):
ret += 1
return ret
def byteCount(bits):
return wordCount(bits, 8)
def reverseBits(value, bitSize):
result = 0
for i in range(bitSize):
result <<= 1
result |= value & 1
value >>= 1
return result
def twosComplement(value, bitSize):
"""compute the 2's complement of int value"""
if (value & (1 << (bitSize - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
value = value - (1 << bitSize) # compute negative value
return value # return positive value as is
class ModelMeta(type):
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
cls.subclasses = {}
def __call__(cls, *args, **kwargs):
key = cls.__name__ + str(args) + str(kwargs)
if key not in cls.subclasses:
#print(f'Key: {key}')
inst = super().__call__(*args, **kwargs)
cls.subclasses[key] = inst
return cls.subclasses[key]
class Model(object, metaclass=ModelMeta):
fstring = None
encoding = None
pytype = None
defaultdisp = '{}'
signed = False
endianness = 'little'
bitReverse = False
modelId = rim.PyFunc
def __init__(self, bitSize, binPoint=0):
self.binPoint = binPoint
self.bitSize = bitSize
self.name = self.__class__.__name__
@property
def isBigEndian(self):
return self.endianness == 'big'
def minValue(self):
return None
def maxValue(self):
return None
class UInt(Model):
pytype = int
defaultdisp = '{:#x}'
modelId = rim.UInt
def __init__(self, bitSize):
super().__init__(bitSize)
self.name = f'{self.__class__.__name__}{self.bitSize}'
# Called by raw read/write and when bitsize > 64
def toBytes(self, value):
return value.to_bytes(byteCount(self.bitSize), self.endianness, signed=self.signed)
# Called by raw read/write and when bitsize > 64
def fromBytes(self, ba):
return int.from_bytes(ba, self.endianness, signed=self.signed)
def fromString(self, string):
return int(string, 0)
def minValue(self):
return 0
def maxValue(self):
return (2**self.bitSize)-1
class UIntReversed(UInt):
"""Converts Unsigned Integer to and from bytearray with reserved bit ordering"""
modelId = rim.PyFunc # Not yet supported
bitReverse = True
def toBytes(self, value):
valueReverse = reverseBits(value, self.bitSize)
return valueReverse.to_bytes(byteCount(self.bitSize), self.endianness, signed=self.signed)
def fromBytes(self, ba):
valueReverse = int.from_bytes(ba, self.endianness, signed=self.signed)
return reverseBits(valueReverse, self.bitSize)
class Int(UInt):
# Override these and inherit everything else from UInt
defaultdisp = '{:d}'
signed = True
modelId = rim.Int
# Called by raw read/write and when bitsize > 64
def toBytes(self, value):
if (value < 0) and (self.bitSize < (byteCount(self.bitSize) * 8)):
newValue = value & (2**(self.bitSize)-1) # Strip upper bits
ba = newValue.to_bytes(byteCount(self.bitSize), self.endianness, signed=False)
else:
ba = value.to_bytes(byteCount(self.bitSize), self.endianness, signed=True)
return ba
# Called by raw read/write and when bitsize > 64
def fromBytes(self,ba):
if (self.bitSize < (byteCount(self.bitSize)*8)):
value = int.from_bytes(ba, self.endianness, signed=False)
if value >= 2**(self.bitSize-1):
value -= 2**self.bitSize
else:
value = int.from_bytes(ba, self.endianness, signed=True)
return
def fromString(self, string):
i = int(string, 0)
# perform twos complement if necessary
if i>0 and ((i >> self.bitSize) & 0x1 == 1):
i = i - (1 << self.bitSize)
return i
def minValue(self):
return -1 * ((2**(self.bitSize-1))-1)
def maxValue(self):
return (2**(self.bitSize-1))-1
class UIntBE(UInt):
endianness = 'big'
class IntBE(Int):
endianness = 'big'
class Bool(Model):
pytype = bool
defaultdisp = {False: 'False', True: 'True'}
modelId = rim.Bool
def __init__(self, bitSize):
assert bitSize == 1, f"The bitSize param of Model {self.__class__.__name__} must be 1"
super().__init__(bitSize)
def fromString(self, string):
return str.lower(string) == "true"
def minValue(self):
return 0
def maxValue(self):
return 1
class String(Model):
encoding = 'utf-8'
defaultdisp = '{}'
pytype = str
modelId = rim.String
def __init__(self, bitSize):
super().__init__(bitSize)
self.name = f'{self.__class__.__name__}({self.bitSize//8})'
def fromString(self, string):
return string
class Float(Model):
"""Converter for 32-bit float"""
defaultdisp = '{:f}'
pytype = float
fstring = 'f'
modelId = rim.Float
def __init__(self, bitSize):
assert bitSize == 32, f"The bitSize param of Model {self.__class__.__name__} must be 32"
super().__init__(bitSize)
self.name = f'{self.__class__.__name__}{self.bitSize}'
def fromString(self, string):
return float(string)
def minValue(self):
return -3.4e38
def maxValue(self):
return 3.4e38
class Double(Float):
fstring = 'd'
modelId = rim.Double
def __init__(self, bitSize):
assert bitSize == 64, f"The bitSize param of Model {self.__class__.__name__} must be 64"
super().__init__(bitSize)
self.name = f'{self.__class__.__name__}{self.bitSize}'
def minValue(self):
return -1.80e308
def maxValue(self):
return 1.80e308
class FloatBE(Float):
endianness = 'big'
fstring = '!f'
class DoubleBE(Double):
endianness = 'big'
fstring = '!d'
class Fixed(Model):
pytype = float
signed = True
modelId = rim.Fixed
def __init__(self, bitSize, binPoint):
super().__init__(bitSize,binPoint)
self.name = f'Fixed_{self.sign}_{self.bitSize}_{self.binPoint}'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.