content
stringlengths 5
1.05M
|
|---|
#Title: SelectProteins.py
#Author: Stephen Tanner, Samuel Payne, Natalie Castellana, Pavel Pevzner, Vineet Bafna
#Created: 2005
# Copyright 2007,2008,2009 The Regents of the University of California
# All Rights Reserved
#
# Permission to use, copy, modify and distribute any part of this
# program for educational, research and non-profit purposes, by non-profit
# institutions only, without fee, and without a written agreement is hereby
# granted, provided that the above copyright notice, this paragraph and
# the following three paragraphs appear in all copies.
#
# Those desiring to incorporate this work into commercial
# products or use for commercial purposes should contact the Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# THE SOFTWARE PROVIDED HEREIN IS ON AN "AS IS" BASIS, AND THE UNIVERSITY
# OF CALIFORNIA HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
# ENHANCEMENTS, OR MODIFICATIONS. THE UNIVERSITY OF CALIFORNIA MAKES NO
# REPRESENTATIONS AND EXTENDS NO WARRANTIES OF ANY KIND, EITHER IMPLIED OR
# EXPRESS, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF
# THE SOFTWARE WILL NOT INFRINGE ANY PATENT, TRADEMARK OR OTHER RIGHTS.
"""
Helper class for FDRUtils.py and FDR.py:
Given an f-score cutoff, select a parsimonious collection of proteins which
account for most / all of the annotations.
"""
import os
import sys
import traceback
import struct
import ResultsParser
from Utils import *
Initialize()
class ProteinSelector(ResultsParser.ResultsParser):
def __init__(self):
self.PeptideDict = {} # aminos -> location list
self.ProteinPeptideCounts = {}
self.ProteinSpectrumCounts = {}
#self.SharedProteinPeptides = {}
#self.SharedProteinSpectra = {}
self.PeptideSpectrumCounts = {}
self.ProteinPeptides = {} # Protein -> List of aminos
self.ProteinNames = {}
self.ProteinSequences = {}
self.MQScoreWeight = 0.3
self.DeltaScoreWeight = 1.5
self.MinimumPeptideLength = 7
self.BestScoresByPeptide = {}
self.PValueCutoff = None
self.MaxFileLines = None
# if RetainRepresentativeCount is set, then we remember the
# best n spectra for a particular annotation in the dictionary
# self.BestRepresentatives
self.RetainRepresentativeCount = None
self.BestRepresentatives = {}
self.AnnotationSpectrumCounts = {}
self.FScoreCutoff2 = None
self.FScoreCutoff3 = None
self.Columns = ResultsParser.Columns()
ResultsParser.ResultsParser.__init__(self)
def FindPeptideLocations(self, Aminos):
PrevPos = -1
LocationList = []
while (1):
Pos = self.DB.find(Aminos, PrevPos + 1)
if Pos == -1:
break
# Which protein does Pos lie in?
LowIndex = 0
HighIndex = len(self.ProteinPos) - 1
# Pos >= ProteinPos[LowIndex] and Pos < ProteinPos[HighIndex]
# Special case - last protein:
if Pos >= self.ProteinPos[HighIndex]:
ProteinID = HighIndex
ResidueNumber = Pos - self.ProteinPos[HighIndex]
else:
while (1):
if LowIndex+1==HighIndex:
ProteinID = LowIndex
ResidueNumber = Pos - self.ProteinPos[LowIndex]
break
MidIndex = (LowIndex + HighIndex) / 2
if Pos >= self.ProteinPos[MidIndex]:
LowIndex = MidIndex
else:
HighIndex = MidIndex
LocationList.append((ProteinID, ResidueNumber))
PrevPos = Pos
return LocationList
def OldFindPeptideLocations(self, Aminos):
LocationList = []
#print "Find locations for %s..."%Aminos
for (ID, Sequence) in self.ProteinSequences.items():
Pos = Sequence.find(Aminos)
if Pos != -1:
LocationList.append((ID, Pos))
#print "Found at pos %s in %s"%(Pos, ID)
if len(LocationList) == 0:
print "*** WARNING: Peptide '%s' not found in the database."%Aminos
return LocationList
def LoadDB(self, DBPath):
DBFile = open(DBPath, "rb")
self.DB = DBFile.read()
DBFile.close()
IndexPath = os.path.splitext(DBPath)[0] + ".index"
IndexFile = open(IndexPath, "rb")
BlockSize = struct.calcsize("<qi80s")
ID = 0
PrevID = None
self.ProteinPos = []
while (1):
Block = IndexFile.read(BlockSize)
if not Block:
break
Info = struct.unpack("<qi80s", Block)
Name = Info[2]
NullPos = Name.find("\0")
if NullPos !=- 1:
Name = Name[:NullPos]
self.ProteinNames[ID]= Name
StartPos = Info[1]
self.ProteinPos.append(StartPos)
if PrevID != None:
self.ProteinSequences[PrevID] = self.DB[self.ProteinPos[PrevID]:StartPos - 1]
PrevID = ID
ID += 1
self.ProteinSequences[PrevID] = self.DB[self.ProteinPos[PrevID]:]
def LoadMultipleDB(self, DBPathList):
"""" Given a list of DB pathnames, load all the corresponding DB """
ID = 0
self.DB = ""
self.ProteinPos = []
for DBPath in DBPathList:
print "loading %s"%DBPath
DBFile = open(DBPath, "rb")
OldDB = self.DB
self.DB += DBFile.read() # concatenate all DBs sequentially
DBFile.close()
IndexPath = os.path.splitext(DBPath)[0] + ".index"
IndexFile = open(IndexPath, "rb")
BlockSize = struct.calcsize("<qi80s")
PrevID = None
while (1):
Block = IndexFile.read(BlockSize)
if not Block:
break
Info = struct.unpack("<qi80s", Block)
Name = Info[2]
NullPos = Name.find("\0")
if NullPos !=- 1:
Name = Name[:NullPos]
self.ProteinNames[ID]= Name
StartPos = Info[1] + len(OldDB) # adjust StartPos for adding a new DB
self.ProteinPos.append(StartPos)
if PrevID != None:
self.ProteinSequences[PrevID] = self.DB[self.ProteinPos[PrevID]:StartPos - 1]
PrevID = ID
ID += 1
self.ProteinSequences[PrevID] = self.DB[self.ProteinPos[PrevID]:]
def OldLoadDB(self, DBPath):
"""
Load the database, popluating self.ProteinSequences
"""
print "LoadDB(%s)"%DBPath
IndexPath = os.path.splitext(DBPath)[0] + ".index"
IndexFile = open(IndexPath, "rb")
DBFile = open(DBPath, "rb")
BlockSize = struct.calcsize("<qi80s")
PrevName = None
PrevID = None
PrevStartPos = None
ID = 0
while (1):
Block = IndexFile.read(BlockSize)
if not Block:
break
Info = struct.unpack("<qi80s", Block)
Name = Info[2]
NullPos = Name.find("\0")
if NullPos !=- 1:
Name = Name[:NullPos]
StartPos = Info[1]
self.ProteinNames[ID] = Name
if PrevName != None:
DBFile.seek(PrevStartPos)
Sequence = DBFile.read(StartPos - PrevStartPos)
Sequence = Sequence.replace("*", "")
self.ProteinSequences[PrevID] = Sequence
PrevName = Name
PrevID = ID
PrevStartPos = StartPos
ID += 1
if PrevName != None:
DBFile.seek(StartPos)
Sequence = DBFile.read()
self.ProteinSequences[PrevID] = Sequence
#self.ProteinNames[PrevID] = Name
DBFile.close()
IndexFile.close()
def ChooseProteins(self):
"""
Iteratively select proteins which account for all the peptides.
"""
self.SelectedProteins = {} # Protein -> (Peptides, Spectra)
self.PeptideProteins = {} # Peptide -> final selection of protein
print "\n\n\n"
print "CHOOSE PROTEINS:"
for (Peptide, SpectrumCount) in self.PeptideSpectrumCounts.items():
for (ProteinID, Pos) in self.PeptideDict[Peptide]:
self.ProteinSpectrumCounts[ProteinID] = self.ProteinSpectrumCounts.get(ProteinID, 0) + SpectrumCount
print "Loaded %s peptides and %s proteins"%(len(self.PeptideSpectrumCounts.keys()), len(self.ProteinSpectrumCounts.keys()))
while (1):
BestCandidate = None
BestScore = None
for Protein in self.ProteinPeptideCounts.keys():
if self.SelectedProteins.has_key(Protein):
continue
PeptideCount = self.ProteinPeptideCounts[Protein]
SpectrumCount = self.ProteinSpectrumCounts.get(Protein, 0)
Score = (PeptideCount, SpectrumCount)
#print Protein, Score
if Score > BestScore or (Score == BestScore and self.ProteinNames[Protein] < self.ProteinNames[BestCandidate]):
BestScore = Score
BestCandidate = Protein
#print "New Best %s, score %s"%(BestCandidate,BestScore)
if not BestScore:
break
(PeptideCount, SpectrumCount) = BestScore
if PeptideCount == 0:
break
#%%%
print "Accept protein %s (%s)\n Gets %s peptides, %s spectra"%(BestCandidate, self.ProteinNames[BestCandidate], PeptideCount, SpectrumCount)
self.SelectedProteins[BestCandidate] = BestScore
# Lay claim to all the (not-yet-claimed) peptides:
for Peptide in self.ProteinPeptides[BestCandidate]:
#print Peptide
if not self.PeptideProteins.has_key(Peptide):
self.PeptideProteins[Peptide] = BestCandidate
# Other proteins (if not already accepted) lose a peptide, and some spectra:
for (OtherProtein, Pos) in self.PeptideDict[Peptide]:
if self.SelectedProteins.has_key(OtherProtein):
continue
self.ProteinPeptideCounts[OtherProtein] -= 1
self.ProteinSpectrumCounts[OtherProtein] = self.ProteinSpectrumCounts.get(OtherProtein, 0) - self.PeptideSpectrumCounts[Peptide]
# Sanity check - the selected proteins have peptides, the unselected proteins have 0
for Protein in self.ProteinPeptideCounts.keys():
ProteinName = self.ProteinNames[Protein]
PeptideCount = self.ProteinPeptideCounts[Protein]
SpectrumCount = self.ProteinSpectrumCounts.get(Protein, 0)
if self.SelectedProteins.has_key(Protein) and PeptideCount <= 0:
print "** Warning: Selected protein %s (%s) has %s peptides!"%(Protein, ProteinName, PeptideCount)
if not self.SelectedProteins.has_key(Protein) and PeptideCount != 0:
print "** Warning: Unelected protein %s (%s) has %s peptides!"%(Protein, ProteinName, PeptideCount)
def ParseAnnotations(self, FileName):
"""
Parse annotations, remembering all protein locations for each peptide.
"""
print "Parse %s..."%FileName
File = open(FileName, "rb")
OldSpectrum = None
Stub = os.path.split(FileName)[1]
LineNumber = 0
for FileLine in File:
LineNumber += 1
if LineNumber % 100 == 0:
print "%s %s..."%(Stub, LineNumber)
if self.MaxFileLines != None and LineNumber >= self.MaxFileLines:
return # Quick-parse, for debugging only!
if FileLine[0] == "#":
self.Columns.initializeHeaders(FileLine)
continue
if not FileLine.strip():
continue
Bits = FileLine.split("\t")
try:
Spectrum = (Bits[self.Columns.getIndex("SpectrumFile")], Bits[self.Columns.getIndex("Scan#")])
except:
continue # header line
if Spectrum == OldSpectrum:
continue
OldSpectrum = Spectrum
try:
MQScore = float(Bits[self.Columns.getIndex("MQScore")])
DeltaScore = float(Bits[self.Columns.getIndex("DeltaScoreOther")])
Charge = int(Bits[self.Columns.getIndex("Charge")])
except:
traceback.print_exc()
print Bits
continue
# Apply a threshold: EITHER f-score cutoff (default) OR p-value cutoff
if self.PValueCutoff != None:
try:
PValue = float(Bits[self.Columns.getIndex("InspectFDR")])
except:
traceback.print_exc()
print Bits
continue
PeptideScore = (-PValue, MQScore)
if PValue > self.PValueCutoff:
continue
else:
if Charge < 3:
WeightedScore = self.MQScoreWeight * MQScore + self.DeltaScoreWeight * (DeltaScore / self.MeanDeltaScore2)
if WeightedScore < self.FScoreCutoff2:
continue
else:
WeightedScore = self.MQScoreWeight * MQScore + self.DeltaScoreWeight * (DeltaScore / self.MeanDeltaScore3)
if WeightedScore < self.FScoreCutoff3:
continue
PeptideScore = WeightedScore
try:
Peptide = GetPeptideFromModdedName(Bits[self.Columns.getIndex("Annotation")])
except:
continue
if len(Peptide.Aminos) < self.MinimumPeptideLength:
continue
# Remember this peptide:
if not self.PeptideDict.get(Peptide.Aminos):
# It's a new peptide! Figure out where it falls in the database:
LocationList = self.FindPeptideLocations(Peptide.Aminos)
for (Protein, Pos) in LocationList:
if not self.ProteinPeptides.has_key(Protein):
self.ProteinPeptides[Protein] = []
#print "Adding peptide %s to protein %s '%s':"%(Peptide.Aminos,Protein,self.ProteinNames[Protein])
self.ProteinPeptides[Protein].append(Peptide.Aminos)
self.PeptideDict[Peptide.Aminos] = LocationList
for (ProteinNumber, Dummy) in LocationList:
self.ProteinPeptideCounts[ProteinNumber] = self.ProteinPeptideCounts.get(ProteinNumber, 0) + 1
else:
# We've seen this peptide before:
LocationList = self.PeptideDict[Peptide.Aminos]
OldScore = self.BestScoresByPeptide.get(Peptide.Aminos, -9999)
self.BestScoresByPeptide[Peptide.Aminos] = max(PeptideScore, OldScore)
self.PeptideSpectrumCounts[Peptide.Aminos] = self.PeptideSpectrumCounts.get(Peptide.Aminos, 0) + 1
##############################################################
# Populate self.BestRepresentative, if requested:
if self.RetainRepresentativeCount:
Peptide.MQScore = MQScore
Peptide.PValue = PValue
Peptide.SpectrumFilePath = Bits[0]
Peptide.ScanNumber = int(Bits[1])
Peptide.SpectrumFilePos = int(Bits[self.Columns.getIndex("SpecFilePos")])
Key = Peptide.GetFullModdedName()
RepresentativeList = self.BestRepresentatives.get(Key, [])
Tuple = (PeptideScore, Peptide)
RepresentativeList.append(Tuple)
RepresentativeList.sort()
self.BestRepresentatives[Key] = RepresentativeList[-self.RetainRepresentativeCount:]
self.AnnotationSpectrumCounts[Key] = self.AnnotationSpectrumCounts.get(Key, 0) + 1
if __name__ == "__main__":
# Test
Bob = ProteinSelector()
Bob.LoadDB("database\DictyCommon.Aug28.FS2.trie")
print Bob.FindPeptideLocations("GTVESEMAEQDSLLNKLNK")
print Bob.FindPeptideLocations("TSEGDFTLLLGQIVDNQIGDLNKSG")
print Bob.FindPeptideLocations("YAVFAPGLADVVIEVVAK")
|
# TPPM wrapper
# Copyright 2021 kensoi
# works with projects that's has generated root.py file via TPPM
# put this file into folder where you located all your projects. It shall look like this:
# your_dir/
# manage.py <-- TPPM wrapper
# botName1/
# assets/
# library/
# root.py
# botName2/
# assets/
# library/
# root.py
# $ python ./manage.py
import subprocess
projects = ['kyokou', 'kyokou2'] # list of projects by project folder name
subprocess.run(f'python -m testcanarybot --run {" ".join(projects)}'.split())
|
from machamp.models.machamp_model import MachampModel
from machamp.models.sentence_decoder import MachampClassifier
from machamp.models.tag_decoder import MachampTagger
from machamp.models.dependency_decoder import MachampBiaffineDependencyParser
from machamp.models.mlm_decoder import MachampMaskedLanguageModel
from machamp.models.multiseq_decoder import MachampMultiTagger
from machamp.models.crf_decoder import MachampCrfTagger
from machamp.models.seq2seq_decoder import MachampSeq2SeqDecoder
|
from __future__ import division
from gltbx import wx_viewer
import wx
from gltbx.gl import *
from scitbx.math import minimum_covering_sphere
from scitbx.array_family import flex
class MyGLWindow(wx_viewer.show_points_and_lines_mixin):
def __init__(self, *args, **kwds):
super(MyGLWindow, self).__init__(*args, **kwds)
self.points = flex.vec3_double([ (-5,-5,-5), (-4,0,0), (0,-8,0), (0,0,-11) ])
self.line_i_seqs = [ (0,1), (0,2), (0,3), (1,2), (1,3), (2,3) ]
self.spheres = [ ((0,0,0), 1) ]
self.flag_show_minimum_covering_sphere = False
self.minimum_covering_sphere = minimum_covering_sphere(self.points)
class MyApp(wx_viewer.App):
def init_view_objects(self):
box = wx.BoxSizer(wx.VERTICAL)
self.view_objects = MyGLWindow(self.frame, size=(600,600))
box.Add(self.view_objects, wx.EXPAND, wx.EXPAND)
self.frame.SetSizer(box)
box.SetSizeHints(self.frame)
if __name__ == '__main__':
a = MyApp(title="An example of using gltbx.wx_viewer")
a.MainLoop()
|
import json
import re
from unidecode import unidecode
def load_jsonl(filename):
examples = []
with open(filename) as f:
for line in f:
_example = json.loads(line)
examples.append(_example)
return examples
def load_jsonl_table(filename):
tables = dict()
with open(filename) as f:
for line in f:
_table = json.loads(line)
tables[_table["id"]] = _table
return tables
def normalize_string(string: str) -> str:
"""
These are the transformation rules used to normalize cell in column names in Sempre. See
``edu.stanford.nlp.sempre.tables.StringNormalizationUtils.characterNormalize`` and
``edu.stanford.nlp.sempre.tables.TableTypeSystem.canonicalizeName``. We reproduce those
rules here to normalize and canonicalize cells and columns in the same way so that we can
match them against constants in logical forms appropriately.
"""
# Normalization rules from Sempre
# \u201A -> ,
string = unidecode(string.lower())
string = re.sub("‚", ",", string)
string = re.sub("„", ",,", string)
string = re.sub("[·・]", ".", string)
string = re.sub("…", "...", string)
string = re.sub("ˆ", "^", string)
string = re.sub("˜", "~", string)
string = re.sub("‹", "<", string)
string = re.sub("›", ">", string)
string = re.sub("[‘’´`]", "'", string)
string = re.sub("[“”«»]", '"', string)
string = re.sub("[•†‡²³]", "", string)
string = re.sub("[‐‑–—−]", "-", string)
string = re.sub("[ðø′″€⁄ªΣ]", "_", string)
string = re.sub("[\\u0180-\\u0210]", "", string).strip()
string = re.sub("[\\u0220-\\uFFFF]", "", string).strip()
string = string.replace("\\n", "_")
string = re.sub("\\s+", " ", string)
# Canonicalization rules from Sempre.
string = re.sub("[^\\w]", "_", string)
string = re.sub("_+", "_", string)
string = re.sub("_$", "", string)
return string.strip("_")
|
from .visualwakewords import VisualWakeWords
|
# MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Generate initial configuration for packit
"""
import logging
from os import getcwd
from pathlib import Path
import click
from packit.cli.types import LocalProjectParameter
from packit.cli.utils import cover_packit_exception
from packit.config import get_context_settings
from packit.constants import CONFIG_FILE_NAMES, PACKIT_CONFIG_TEMPLATE
from packit.exceptions import PackitException
logger = logging.getLogger(__file__)
@click.command("generate", context_settings=get_context_settings())
@click.argument("path_or_url", type=LocalProjectParameter(), default=getcwd())
@click.option(
"-f", "--force", is_flag=True, help="Reset config to default if already exists."
)
@cover_packit_exception
def generate(path_or_url, force):
"""
Generate new packit config.
"""
# find name of config file if already exists
config_file_name = None
for existing_config_file in CONFIG_FILE_NAMES:
if (Path.cwd() / existing_config_file).is_file():
config_file_name = existing_config_file
if not force:
raise PackitException(
f"Packit config {config_file_name} already exists."
" If you want to regenerate it use `packit generate --force`"
)
break
template_data = {
"upstream_project_name": path_or_url.repo_name,
"downstream_package_name": path_or_url.repo_name,
}
generate_config(
write_to_file=True,
template_data=template_data,
config_file_name=config_file_name,
)
def generate_config(write_to_file=False, template_data=None, config_file_name=None):
"""
Generate config file from provided data
:param write_to_file: bool, False by default
:param template_data: dict, example:
{
"upstream_project_name": "packitos",
"downstream_package_name": "packit",
}
:param config_file_name: str, name of config file, `.packit.yaml` by default
:return: str, generated config
"""
# default name
config_file_name = config_file_name or ".packit.yaml"
output_config = PACKIT_CONFIG_TEMPLATE.format(
downstream_package_name=template_data["downstream_package_name"],
upstream_project_name=template_data["upstream_project_name"],
)
if write_to_file:
Path(config_file_name).write_text(output_config)
return output_config
|
from helper import get_input, flatten
def get_neighbours(pos, data):
row = pos[0]
col = pos[1]
height = pos[2]
neighbours = []
for r in range(-1, 2):
for c in range(-1, 2):
for h in range(-1, 2):
if r == 0 and c == 0 and h == 0:
continue
if (row + r, col + c, height + h) in data:
neighbours.append((row + r, col + c, height + h))
return neighbours
def print_data(data):
row_pos = [x[0] for x in data]
col_pos = [x[1] for x in data]
height_pos = [x[2] for x in data]
for h in range(min(height_pos), max(height_pos) + 1):
print(f'z={h}')
for r in range(min(row_pos), max(row_pos) + 1):
for c in range(min(col_pos), max(col_pos) + 1):
if (r, c, h) in data:
print('#', end='')
else:
print('.', end='')
print('\n', end='')
print('\n', end='')
def main():
input_data = get_input(17)
data = flatten([[(i, j, 0) for j, y in enumerate(list(x)) if y == '#'] for i, x in enumerate(input_data.split('\n')) if x])
print_data(data)
for _ in range(6):
new_data = []
row_pos = [x[0] for x in data]
col_pos = [x[1] for x in data]
height_pos = [x[2] for x in data]
for r in range(min(row_pos)-1, max(row_pos)+2):
for c in range(min(col_pos)-1, max(col_pos)+2):
for h in range(min(height_pos)-1, max(height_pos)+2):
pos = (r, c, h)
neighbours = get_neighbours(pos, data)
if pos in data and (len(neighbours) == 2 or len(neighbours) == 3):
new_data.append(pos)
if pos not in data and len(neighbours) == 3:
new_data.append(pos)
data = new_data
# print_data(data)
print(len(data))
if __name__ == '__main__':
main()
|
from make_dataframe import make_df
df = make_df()
print(df.dtypes)
print(df.to_markdown())
|
import logging
from pip._internal.cli.base_command import Command
class FakeCommand(Command):
name = 'fake'
summary = name
def __init__(self, error=False):
self.error = error
super(FakeCommand, self).__init__()
def main(self, args):
args.append("--disable-pip-version-check")
return super(FakeCommand, self).main(args)
def run(self, options, args):
logging.getLogger("pip.tests").info("fake")
if self.error:
raise SystemExit(1)
class FakeCommandWithUnicode(FakeCommand):
name = 'fake_unicode'
summary = name
def run(self, options, args):
logging.getLogger("pip.tests").info(b"bytes here \xE9")
logging.getLogger("pip.tests").info(
b"unicode here \xC3\xA9".decode("utf-8")
)
class Test_base_command_logging(object):
"""
Test `pip.base_command.Command` setting up logging consumers based on
options
"""
def test_log_command_success(self, tmpdir):
"""
Test the --log option logs when command succeeds
"""
cmd = FakeCommand()
log_path = tmpdir.join('log')
cmd.main(['fake', '--log', log_path])
with open(log_path) as f:
assert 'fake' == f.read().strip()[:4]
def test_log_command_error(self, tmpdir):
"""
Test the --log option logs when command fails
"""
cmd = FakeCommand(error=True)
log_path = tmpdir.join('log')
cmd.main(['fake', '--log', log_path])
with open(log_path) as f:
assert 'fake' == f.read().strip()[:4]
def test_log_file_command_error(self, tmpdir):
"""
Test the --log-file option logs (when there's an error).
"""
cmd = FakeCommand(error=True)
log_file_path = tmpdir.join('log_file')
cmd.main(['fake', '--log-file', log_file_path])
with open(log_file_path) as f:
assert 'fake' == f.read().strip()[:4]
def test_unicode_messages(self, tmpdir):
"""
Tests that logging bytestrings and unicode objects don't break logging
"""
cmd = FakeCommandWithUnicode()
log_path = tmpdir.join('log')
cmd.main(['fake_unicode', '--log', log_path])
|
import logging
import subprocess
import os
def read_bam(bamfilePath):
"""Reads a .bam line by line"""
logging.debug(
'[DEBUG] (read_bam) Launching samtools in subprocess to yield the lines of the bamfile {}'.format(bamfilePath))
# Samtools must be installed !
cmd = 'samtools view '+os.path.realpath(bamfilePath)
logging.debug('[DEBUG] (read_bam) cmd = {}'.format(cmd))
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE) # No .sam dropping
logging.debug(
'[DEBUG] (read_bam) Starting to read output from samtools for {}'.format(bamfilePath))
while True:
# We will read the output of samtools line by line. Decoding is needed because popen returns bytes, not strings
line = proc.stdout.readline().decode('utf-8')
if line: # While there is something to read (else it will break)
yield line
else: # There is nothing left in the stdout of samtools
logging.debug(
'[DEBUG] (read_bam) reading bamfile {} seems over'.format(bamfilePath))
break
return
def read_header(bamfilePath):
""" returns the header as a python-string .sam"""
logging.debug("[DEBUG] Reading headers of {}".format(bamfilePath))
samtxt = ''
cmd = 'samtools view -h -S '+str(bamfilePath)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stdin=subprocess.PIPE) # We launch samtools view
while True:
output = proc.stdout.readline().decode('utf-8')
if output[0] == '@':
samtxt = samtxt + output
else:
break
return samtxt
|
import copy
import logging
import random
import numpy as np
import torch
from matminer.featurizers.composition import ElementProperty
from sklearn.feature_selection import VarianceThreshold
from torch.utils.data import Dataset
from tqdm import tqdm
from cacgan.data.schema import AtmoStructureGroup, FormulaEncoder, pd
from cacgan.utils import Composition, chunks, convert_mno
from cacgan.utils import composition_elements, diff2formula, SEED
def feat_compositions(feater: ElementProperty, comps: [Composition]) -> np.ndarray:
"""use matminer ElementProperty to generate an array of features for a list of compositions"""
feat_array = np.zeros((len(comps), len(feater.feature_labels())))
for i, c in tqdm(enumerate(comps)):
feat_array[i] = feater.featurize(c.fractional_composition)
return feat_array
def variance_threshold_selector(data: pd.DataFrame, threshold=1e-5) -> pd.DataFrame:
"""remove columns with a variance less than threshold form a pd.DataFrame"""
selector = VarianceThreshold(threshold)
selector.fit(data)
return data[data.columns[selector.get_support(indices=True)]]
def rm_nan_columns(a: np.ndarray, m="any") -> np.ndarray:
"""remove nan values from an np.ndarray"""
if m == "any":
return a[:, ~np.isnan(a).any(axis=0)]
elif m == "all":
return a[:, ~np.isnan(a).all(axis=0)]
else:
raise ValueError("m must be any or all!")
class GroupAB:
def __init__(self, group_a: AtmoStructureGroup, group_b: AtmoStructureGroup, ):
"""
select two AtmoStructureGroup objs to build up dataset
"""
self.group_a = group_a
self.group_b = group_b
# elements appear in at least one group
self.possible_elements = sorted(set(self.group_a.possible_elements + self.group_b.possible_elements))
self.total_chem_pairs = []
self.total_alchemy_pairs = []
# i2js[i] would return a *set* of all possible j paired to i
self.i2js = dict()
self.j2is = dict()
for i, a in enumerate(self.group_a):
ea = composition_elements(a.composition)
for j, b in enumerate(self.group_b):
eb = composition_elements(b.composition)
if ea == eb:
self.total_chem_pairs.append((i, j))
if i not in self.i2js:
self.i2js[i] = {j, }
else:
self.i2js[i].add(j)
if j not in self.j2is:
self.j2is[j] = {i, }
else:
self.j2is[j].add(i)
else:
self.total_alchemy_pairs.append((i, j))
self.a_haspair = sorted(set([p[0] for p in self.total_chem_pairs]))
self.b_haspair = sorted(set([p[1] for p in self.total_chem_pairs]))
self.a_single = [i for i in range(len(self.group_a)) if i not in self.a_haspair]
self.b_single = [i for i in range(len(self.group_b)) if i not in self.b_haspair]
# # you don't need this
# random.shuffle(self.total_chem_pairs)
# random.shuffle(self.total_alchemy_pairs)
self.encoder = FormulaEncoder(self.possible_elements)
self.group_a.encoded = self.encoder.encode_2d(
[a.composition.fractional_composition.formula for a in self.group_a])
self.group_b.encoded = self.encoder.encode_2d(
[a.composition.fractional_composition.formula for a in self.group_b])
self.Avalues = self.group_a.encoded.copy()
self.Bvalues = self.group_b.encoded.copy()
class FormulaDataset(Dataset):
def __init__(self, gab: GroupAB, mo: bool = False, mno: bool = False):
"""
dataset passed to pytorch dataloader
:param gab: the GroupAB object after selecting two groups of ATMO
:param mo: should we use MO composition only?
:param mno: only keep composition of 1M 1NM
"""
self.gab = gab
if mo:
ga = self.gab.group_a.to_mocomp()
gb = self.gab.group_b.to_mocomp()
self.gab = GroupAB(ga, gb)
if mno:
ga = self.gab.group_a.keep_only_mno()
gb = self.gab.group_b.keep_only_mno()
self.gab = GroupAB(ga, gb)
self.convertmno = None # every time an AB pair is drawn, B is converted to the chem sys of A, i.e. "fake" pairing
self.setup_params = None
self.mode = None
def convert_mno(self):
"""set dataset to include only mno structures"""
self.convertmno = True
def train(self):
"""set dataset to training mode"""
assert self.setup_params is not None
self.mode = "train"
def test(self):
"""set dataset to training mode"""
assert self.setup_params is not None
self.mode = "test"
def as_train(self):
"""return a copy of the data set and set it to training mode"""
d = copy.deepcopy(self)
d.train()
return d
def as_test(self):
"""return a copy of the data set and set it to test mode"""
d = copy.deepcopy(self)
d.test()
return d
def setup(
self,
length_multiplier=1.0,
a_haspair_train=1.0,
b_haspair_train=0.66,
a_single_train=1.0,
b_single_train=1.0,
setup_seed=SEED,
b_cv_fold=3,
b_cv_index=0,
b_cv=True,
b_cv_rev=False
):
"""
split train/valid, b_haspair_train should < 1, otherwise there would be no data in valid
when a_haspair_train < 1, we split both A and B, that is,
For A
in train -- a_haspair_train + a_single_train
in valid -- a_haspair_valid = (1 - a_haspair_train)
For B
in train -- b_haspair_train + b_single_train
in valid -- b_haspair_valid = (1 - b_haspair_train)
the question becomes if we can generate unseen B from unseen A, which sounds a bit weird...
for validation we can only use pairs from a_haspair_valid and b_haspair_valid, which may be very few,
when a_haspair_train = 1, the same set of A samples are used for train and valid, that is,
For A
in train -- a_haspair + a_single_train
in valid -- a_haspair
For B (this stays the same)
in train -- b_haspair_train + b_single_train
in valid -- b_haspair_valid = (1 - b_haspair_train)
the question becomes if we can generate unseen B from already seen A
this can guarantee that we are not predicting an unseen chemical system (even we saw it thru A)
"""
assert 0 <= a_haspair_train <= 1
assert 0 <= b_haspair_train < 1
assert 0 <= a_single_train <= 1
assert 0 <= b_single_train <= 1
self.length_multiplier = length_multiplier
random.seed(setup_seed) # remember shuffle is in-place
self.train_a_single = random.sample(self.gab.a_single, int(len(self.gab.a_single) * a_single_train))
self.train_a_haspair = random.sample(self.gab.a_haspair, int(len(self.gab.a_haspair) * a_haspair_train))
self.train_a = self.train_a_single + self.train_a_haspair
if a_haspair_train < 1:
self.test_a = [i for i in self.gab.a_haspair if i not in self.train_a_haspair]
self.test_a = random.sample(self.test_a, k=len(self.test_a))
else:
assert a_haspair_train == 1
self.test_a = random.sample(self.train_a_haspair, k=len(self.train_a_haspair))
self.train_b_single = random.sample(self.gab.b_single, int(len(self.gab.b_single) * b_single_train))
if b_cv:
assert b_cv_index in range(b_cv_fold)
b_has_pair = [j for j in self.gab.b_haspair]
# print("b_has_pair", self.gab.b_haspair)
random.Random(setup_seed).shuffle(b_has_pair)
# print("b_has_pair shuffled", b_has_pair)
b_haspair_chunks = list(chunks(b_has_pair, len(b_has_pair) // b_cv_fold))
if len(b_has_pair) % b_cv_fold > 0:
b_haspair_chunks[-2] = b_haspair_chunks[-2] + b_haspair_chunks[-1]
b_haspair_chunks.pop(-1)
if b_cv_rev:
self.train_b_haspair = b_haspair_chunks[b_cv_index]
else:
self.train_b_haspair = []
for i in range(b_cv_fold):
if i != b_cv_index:
self.train_b_haspair += b_haspair_chunks[i]
else:
random.seed(b_cv_index)
self.train_b_haspair = random.sample(self.gab.b_haspair, int(len(self.gab.b_haspair) * b_haspair_train))
random.seed(setup_seed)
self.train_b = self.train_b_single + self.train_b_haspair
self.test_b = [i for i in self.gab.b_haspair if i not in self.train_b_haspair]
self.pairs_in_test = []
for j in self.test_b:
i_in_test_pool = set(self.test_a).intersection(self.gab.j2is[j])
for i in i_in_test_pool:
self.pairs_in_test.append((i, j))
self.test_apool = [p[0] for p in self.pairs_in_test]
self.test_bpool = [p[1] for p in self.pairs_in_test]
self.train_apool = self.train_a
self.train_bpool = self.train_b
random.shuffle(self.train_apool) # these shouldn't matter...
random.shuffle(self.train_bpool)
self.setup_params = dict(
length_multiplier=length_multiplier,
a_haspair_train=a_haspair_train,
b_haspair_train=b_haspair_train,
a_single_train=a_single_train,
b_single_train=b_single_train,
seed=SEED,
b_cv_fold=b_cv_fold,
b_cv_index=b_cv_index,
b_cv=b_cv,
)
self.setup_params["details"] = self.details
test_bpool_refcodes = [self.gab.group_b[j].identifier for j in self.test_bpool]
self.setup_params["test_bpool_refcodes"] = test_bpool_refcodes
@property
def details(self):
s = "=== meta ===\n"
s += "A group size: {}\n".format(len(self.gab.group_a))
s += "B group size: {}\n".format(len(self.gab.group_b))
s += "A group amine: {}\n".format(self.gab.group_a.first_amine)
s += "B group amine: {}\n".format(self.gab.group_b.first_amine)
s += "total chem pairs: {}\n".format(len(self.gab.total_chem_pairs))
s += "total alchemy pairs: {}\n".format(len(self.gab.total_alchemy_pairs))
s += "=== dataset ===\n"
s += "train a single: {}\n".format(len(self.train_a_single))
s += "train b single: {}\n".format(len(self.train_b_single))
s += "train a haspair: {}\n".format(len(self.train_a_haspair))
s += "train b haspair: {}\n".format(len(self.train_b_haspair))
s += "train a pool: {}\n".format(len(self.train_apool))
s += "train b pool: {}\n".format(len(self.train_bpool))
s += "test a: {}\n".format(len(self.test_a))
s += "test b: {}\n".format(len(self.test_b))
s += "test pairs: {}\n".format(len(self.pairs_in_test))
return s
def __getitem__(self, index):
assert self.mode in ("train", "test",)
if self.mode == "train":
i = self.train_apool[index % len(self.train_apool)]
j = random.choice(self.train_bpool)
else:
i = self.test_apool[index % len(self.test_apool)]
j = self.test_bpool[index % len(self.test_bpool)]
Avalue = self.gab.Avalues[i]
if self.convertmno and self.mode == "train":
Bvalue = convert_mno(Avalue, self.gab.Bvalues[j], self.gab.possible_elements)
else:
Bvalue = self.gab.Bvalues[j]
Acomp = self.gab.group_a[i].composition.formula
Bcomp = self.gab.group_b[j].composition.formula
item_A = torch.from_numpy(Avalue) # .to(DEVICE)
item_B = torch.from_numpy(Bvalue) # .to(DEVICE)
d = {
'A': item_A,
'B': item_B,
'Acomp': Acomp,
'Bcomp': Bcomp,
}
return d
def __len__(self):
assert self.mode in ("train", "test",)
if self.mode == "train":
return int(len(self.train_apool) * self.length_multiplier)
else:
return len(self.test_apool)
def get_baseline(self):
"""only implemented this for a_haspair_train = 1"""
assert len(self.train_a_haspair) == len(self.gab.a_haspair)
baselines = []
# for each unique j in test, what is the closest a we have seen?
for j in self.test_b:
bformula = self.gab.group_b[j].composition.formula
aformulae = [self.gab.group_a[i].composition.formula for i in self.gab.j2is[j]]
deltas = [diff2formula(aformula, bformula) for aformula in aformulae]
baselines.append(min(deltas))
return baselines
class DimDataset:
def __init__(self, x: pd.DataFrame, y: pd.DataFrame, hx=None, hy=None):
"""
dataset used for dimensionality prediction
"""
self.x = x
self.y = y
self.hx = hx
self.hy = hy
def holdout(self, exclude_refcode: [str]):
"""
:param exclude_refcode: to exclude a list of structures and put them in holdout data/target
"""
assert len(exclude_refcode) > 0
refcodes = self.x.index.tolist()
hx = self.x.loc[exclude_refcode]
hy = self.y.loc[exclude_refcode]
remaining = list(set(refcodes).difference(set(exclude_refcode)))
x = self.x.loc[remaining]
y = self.y.loc[remaining]
self.x = x
self.y = y
self.hx = hx
self.hy = hy
@classmethod
def from_atmogroup(cls, atmo_group: AtmoStructureGroup, feat=False, mocomp=False):
"""create dimensionality dataset from an AtmoStructureGroup"""
if mocomp:
structures = atmo_group.to_mocomp().structures
else:
structures = atmo_group.structures
fc = FormulaEncoder(AtmoStructureGroup(structures).possible_elements)
comp = fc.encode_2d([a.composition.fractional_composition.formula for a in structures])
data_df = pd.DataFrame(comp, columns=fc.possible_elements)
dim_df = pd.DataFrame([a.dim for a in structures], columns=["dimension"])
refcodes = [a.identifier for a in structures]
data_df.index = refcodes
dim_df.index = refcodes
if feat:
feat_arrays = []
columns = []
for name in ["magpie", "matscholar_el", "matminer"]:
epfeat = ElementProperty.from_preset(name)
farray = feat_compositions(epfeat, [s.composition for s in structures])
feat_arrays.append(farray)
columns += [n for n in epfeat.feature_labels()]
feat_array = np.hstack(feat_arrays)
logging.warning("generated feature array shape: {}".format(feat_array.shape))
feat_df = pd.DataFrame(feat_array, columns=columns)
feat_df = feat_df.dropna(axis=1, how="any")
logging.warning("after removing nan columns: {}".format(feat_array.shape))
feat_df = variance_threshold_selector(feat_df, 1e-5)
logging.warning("after removing low variance columns: {}".format(feat_array.shape))
data_df = pd.concat([data_df, feat_df], axis=1)
return cls(data_df, dim_df)
|
import numpy as np
from skimage import measure as measure
def keep_largest_connected_components(mask, n_classes):
'''
Keeps only the largest connected components of each label for a segmentation mask.
'''
out_img = np.zeros(mask.shape, dtype=np.uint8)
for struc_id in np.arange(1, n_classes):
binary_img = mask == struc_id
blobs = measure.label(binary_img, connectivity=1)
props = measure.regionprops(blobs)
if not props:
continue
area = [ele.area for ele in props]
largest_blob_ind = np.argmax(area)
largest_blob_label = props[largest_blob_ind].label
out_img[blobs == largest_blob_label] = struc_id
return out_img
|
from __future__ import print_function
from future.utils import iteritems
from xgcm.grid import Axis, raw_interp_function
import xarray as xr
def generate_axis(
ds,
axis,
name,
axis_dim,
pos_from="center",
pos_to="left",
boundary_discontinuity=None,
pad="auto",
new_name=None,
attrs_from_scratch=True,
):
"""
Creates c-grid dimensions (or coordinates) along an axis of
Parameters
----------
ds : xarray.Dataset
Dataset with gridinformation used to construct c-grid
axis : str
The appropriate xgcm axis. E.g. 'X' for longitudes.
name : str
The name of the variable in ds, providing the original grid.
axis_dim : str
The dimension of ds[name] corresponding to axis. If name itself is a
dimension, this should be equal to name.
pos_from : {'center','left','right'}, optional
Position of the gridpoints given in 'ds'.
pos_to : {'left','center','right'}, optional
Position of the gridpoints to be generated.
boundary_discontinuity : {None, float}, optional
If specified, marks the value of discontinuity across boundary, e.g.
360 for global longitude values and 180 for global latitudes.
pad : {'auto', None, float}, optional
If specified, determines the padding to be applied across boundary.
If float is specified, that value is used as padding. Auto attempts to
pad linearly extrapolated values. Can be useful for e.g. depth
coordinates (to reconstruct 0 depth). Can lead to unexpected values
when coordinate is multidimensional.
new_name : str, optional
Name of the inferred grid variable. Defaults to name+'_'+pos_to'
attrs_from_scratch : bool, optional
Determines if the attributes are created from scratch. Should be
enabled for dimensions and deactivated for multidimensional
coordinates. These can only be calculated after the dims are created.
"""
if not isinstance(ds, xr.Dataset):
raise ValueError("'ds' needs to be xarray.Dataset")
if new_name is None:
new_name = name + "_" + pos_to
# Determine the relative position to interpolate to based on current and
# desired position
relative_pos_to = _position_to_relative(pos_from, pos_to)
# This is bloated. We can probably retire the 'auto' logic in favor of
# using 'boundary' and 'fill_value'. But first lets see if this all works.
if (boundary_discontinuity is not None) and (pad is not None):
raise ValueError(
"Coordinate cannot be wrapped and padded at the\
same time"
)
elif (boundary_discontinuity is None) and (pad is None):
raise ValueError(
'Either "boundary_discontinuity" or "pad" have \
to be specified'
)
if pad is None:
fill_value = 0.0
boundary = None
periodic = True
elif pad == "auto":
fill_value = 0.0
boundary = "extrapolate"
periodic = False
else:
fill_value = pad
boundary = "fill"
periodic = False
kwargs = dict(
boundary_discontinuity=boundary_discontinuity,
fill_value=fill_value,
boundary=boundary,
position_check=False,
)
ds = ds.copy()
# For a set of coordinates there are two fundamental cases. The coordinates
# are a) one dimensional (dimensions) or 2) multidimensional. These are
# separated by the keyword attrs_from_scratch.
# These two cases are treated differently because for each dataset we need
# to recreate all a) cases before we can proceed to 2), hence this is
# really the 'raw' data processing step. If we have working one dimensional
# coordinates (e.g. after we looped over the axes_dims_dict, we can use the
# regular xgcm.Axis to interpolate multidimensional coordinates.
# This assures that any changes to the Axis.interp method can directly
# propagate to this module.
if attrs_from_scratch:
# Input coordinate has to be declared as center,
# or xgcm.Axis throws error. Will be rewrapped below.
ds[name] = _fill_attrs(ds[name], "center", axis)
ax = Axis(ds, axis, periodic=periodic)
args = ds[name], raw_interp_function, relative_pos_to
ds.coords[new_name] = ax._neighbor_binary_func_raw(*args, **kwargs)
# Place the correct attributes
ds[name] = _fill_attrs(ds[name], pos_from, axis)
ds[new_name] = _fill_attrs(ds[new_name], pos_to, axis)
else:
kwargs.pop("position_check", None)
ax = Axis(ds, axis, periodic=periodic)
args = ds[name], pos_to
ds.coords[new_name] = ax.interp(*args, **kwargs)
return ds
def generate_grid_ds(
ds,
axes_dims_dict,
axes_coords_dict=None,
position=None,
boundary_discontinuity=None,
pad="auto",
new_name=None,
):
"""
Add c-grid dimensions and coordinates (optional) to observational Dataset
Parameters
----------
ds : xarray.Dataset
Dataset with gridinformation used to construct c-grid
axes_dims_dict : dict
Dict with information on the dimension in ds corrsponding to the xgcm
axis. E.g. {'X':'lon','Y':'lat'}
axes_coords_dict : dict, optional
Dict with information on the coordinates in ds corrsponding to the
xgcm axis. E.g. {'X':'geolon','Y':'geolat'}
position : {None,tuple, dict}, optional
Position of the gridpoints given in 'ds' and the desired position to be
generated. Defaults to ('center','left'). Can be a tuple like
('center','left'), or a dict with corresponding axes
(e.g. {'X':('center','left'),'Z':('left','center')})
boundary_discontinuity : {None, float, dict}, optional
Specifies the discontinuity at the boundary to wrap e.g. longitudes
without artifacts. Can be defined globally (for all fields defined in
axes_dims_dict and axes_coords_dict) {float, None} or per dataset
variable (dict e.g. {'longitude':360,'latitude':180})
pad : {'auto', None, float}, optional
Specifies the padding at the boundary to extend values past the boundary.
Can be defined globally (for all fields defined in
axes_dims_dict and axes_coords_dict) {float, None} or per dataset
variable ({dict} e.g. {'z':'auto','latitude':0.0})
new_name : str, optional
Name of the inferred grid variable. Defaults to name+'_'+position[1]
"""
if axes_coords_dict is not None:
combo_dict = [axes_dims_dict, axes_coords_dict]
else:
combo_dict = [axes_dims_dict]
for di, dd in enumerate(combo_dict):
if di == 0:
attrs_from_scratch = True
infer_dim = False
elif di == 1:
attrs_from_scratch = False
infer_dim = True
for ax in dd.keys():
# Get variable name
ax_v = dd[ax]
# Get dimension name
if infer_dim:
ax_d = axes_dims_dict[ax]
else:
ax_d = ax_v
# Parse position
pos_from, pos_to = _parse_position(position, ax)
# Pass wrap characteristics
is_discontinous = _parse_boundary_params(boundary_discontinuity, ax_v)
# Pass pad characteristics
is_padded = _parse_boundary_params(pad, ax_v)
ds = generate_axis(
ds,
ax,
ax_v,
ax_d,
pos_from=pos_from,
pos_to=pos_to,
boundary_discontinuity=is_discontinous,
pad=is_padded,
new_name=new_name,
attrs_from_scratch=attrs_from_scratch,
)
return ds
def _parse_boundary_params(in_val, varname):
"""Parse boundary_discontinuity or pad parameters"""
if isinstance(in_val, dict):
try:
is_valued = in_val[varname]
except KeyError:
# Set defaults
is_valued = None
else:
is_valued = in_val
return is_valued
def _parse_position(position, axname, pos_default=("center", "left")):
if isinstance(position, dict):
try:
pos_from = position[axname][0]
except KeyError:
pos_from = pos_default[0]
try:
pos_to = position[axname][1]
except KeyError:
pos_to = pos_default[1]
elif isinstance(position, tuple):
pos_from = position[0]
pos_to = position[1]
else:
# Set defaults
pos_from = pos_default[0]
pos_to = pos_default[1]
return pos_from, pos_to
def _position_to_relative(pos_from, pos_to):
"""Translate from to positions in relative movement"""
if (pos_from == "left" and pos_to == "center") or (
pos_from == "center" and pos_to == "right"
):
to = "right"
elif (pos_from == "center" and pos_to == "left") or (
pos_from == "right" and pos_to == "center"
):
to = "left"
elif pos_from == "center" and pos_to == "outer":
to = "outer"
elif pos_from == "center" and pos_to == "inner":
to = "inner"
else:
raise RuntimeError(
"Cannot infer '%s' coordinates \
from '%s'"
% (pos_to, pos_from)
)
return to
def _fill_attrs(da, pos, axis):
"""Replace comdo attributes according to pos and axis"""
attrs = da.attrs
attrs["axis"] = axis
if pos == "center":
attrs.pop("c_grid_axis_shift", None)
elif pos in ["left", "outer"]:
attrs["c_grid_axis_shift"] = -0.5
elif pos in ["right", "inner"]:
attrs["c_grid_axis_shift"] = 0.5
da.attrs = attrs
return da
|
from typing import Union
class Vocab(object):
def __init__(self,
vocab_path: str,
unk_token: str = '<unk>',
bos_token: str = '<s>',
eos_token: str = '</s>',
pad_token: str = '<pad>',
sep_token: str = '<sep>'):
self.unk_token = unk_token
self.bos_token = bos_token
self.eos_token = eos_token
self.pad_token = pad_token
self.sep_token = sep_token
with open(vocab_path, 'r', encoding='utf-8') as fp:
self.additional_tokens = [bos_token, eos_token, pad_token]
# The additional tokens would be inserted before the words.
self.words = self.additional_tokens + fp.read().split() + [sep_token]
self.vocab = {word: i for i, word in enumerate(self.words)}
def __getitem__(self, idx_or_token: Union[int, str]) -> Union[str, int]:
if isinstance(idx_or_token, str):
return self.vocab[idx_or_token]
else:
return self.words[idx_or_token]
def __contains__(self, token: str) -> bool:
return token in self.words
def __len__(self) -> int:
# Note that vocabulary size must be a multiple of 8 although the actual
# number of words is less than it.
return (len(self.words) + 7) // 8 * 8
@property
def unk_idx(self) -> int:
return self.vocab[self.unk_token]
@property
def bos_idx(self) -> int:
return self.vocab[self.bos_token]
@property
def eos_idx(self) -> int:
return self.vocab[self.eos_token]
@property
def pad_idx(self) -> int:
return self.vocab[self.pad_token]
@property
def sep_idx(self) -> int:
return self.vocab[self.sep_token]
|
"""
Cellular provisioning classes
"""
import hashlib
import binascii
from time import sleep
import boto3
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from pyawsutils.mar import aws_mar
from pyawsutils.aws_cloudformation import MCHP_SANDBOX_ATS_ENDPOINT
from pyawsutils.aws_ca_cert import aws_get_root_ca_cert_filename
from pytrustplatform.ecc_cert_builder import build_certs_from_ecc
from pytrustplatform.device_cert_builder import build_device_cert
from ..provisioner import Provisioner, ProvisionerAwsMar, ProvisionerAwsJitr, ProvisionerError
from ..config import Config
from ..kit_config import kit_configure_disk_link
from ..eccstorage import EccStorage
from .atprovisioner import AtProvisioner
from .sequans_ciphersuites import print_ciphersuites, validate_ciphersuites, DEFAULT_CIPHERSUITES
DEFAULT_CELLULAR_PROVIDER = "standard"
# list of valid frequency band values.
CELLULAR_VALID_FREQ_BANDS = [1, 2, 3, 4, 5, 8, 12, 13, 14, 17, 18, 19, 20, 25, 26, 28, 66, 71, 85]
def get_cellular_provisioner(programmer, args):
"""
Resolves the cellular provisioning algorithm requested by the user
"""
if args.cloud_provider == "google":
# Only one option for Google
return CellularProvisionerGoogle(programmer, args.skip_program_provision_firmware, args.port)
if args.cloud_provider == "aws":
# AWS can be done many ways:
if args.provision_method == "mar":
# Multi-Account registration (to user account)
return CellularProvisionerAwsMar(programmer, args.skip_program_provision_firmware, args.port)
if args.provision_method == "jitr":
# Just-In-Time Registration (to user account)
return CellularProvisionerAwsJitr(programmer, args.skip_program_provision_firmware, args.port)
# Microchip sandbox
return CellularProvisionerAws(programmer, args.skip_program_provision_firmware, args.port)
if args.cloud_provider == "azure":
# Azure (preliminary)
return CellularProvisionerAzure(programmer, args.skip_program_provision_firmware, args.port)
if args.cloud_provider is None:
# This choice is valid for debuggerupgrade action
return CellularProvisioner(programmer, args.skip_program_provision_firmware, args.port)
raise ProvisionerError("Unable find Cellular provisioner for {} - {}".format(args.cloud_provider,
args.provision_method))
class CellularProvisioner(Provisioner):
"""
This class implements provisioning for AVR-IoT Cellular kit. Its subclasses mirrors the structure of the
Provisioner class hierarchy.
"""
DEVICE_CERT_SLOT = 0
DEVICE_PRIVATEKEY_SLOT = 0
ROOT_CERT_SLOT = 19
def __init__(self, programmer, skip_program_provision_fw=False, port=None):
super().__init__(programmer, skip_program_provision_fw, port=port)
self.provider = DEFAULT_CELLULAR_PROVIDER
self.frequency_bands = None # None means don't configure frequency bands for provider.
self.aws_profile = None
self.ciphersuites = None
self.client_key_storage = 1 # Use ECC private key
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.disconnect()
def set_cellular_params(self, args):
"""
Set up Cellular specific parameters that cannot be passed in constructor due to protocol
:param args: Parsed-out command-line arguments
"""
self.provider = None # => Don't change provider
self.frequency_bands = None # => Don't change bands. FIXME: move to pysequans?
self.ciphersuites = validate_ciphersuites(DEFAULT_CIPHERSUITES.get(args.cloud_provider, []))
self.aws_profile = args.aws_profile
def connect(self, function, skip_programming=False):
"""
Implement additional steps to synchronize with Sequans modem reset after initial FW programming/reset
:param function: Firmware function (eg. "iotprovision") as defined
:param skip_programming: Skip programming FW.
"""
super().connect(function, skip_programming)
# At this point we should be freshly out of reset and in sync with the firmware, we now should synchronize
# with Sequans modem, eg by means of waiting for +SYSSTART URC in bridge mode.
# FIXME: However could not make that work, so just do a mystery delay for now.
# The minimum delay required is surprisingly consistent 0.9-1.0 seconds
sleep(1.2)
def do_provision(self, force_new_device_certificate=False, skip_program_provision_firmware=False):
"""
Common part of Cellular provisioning, independent of cloud provider and method. Subclasses should
override this and append their specific parts of provisioning.
:param force_new_device_certificate: Force creation of device certificates
:param skip_program_provision_firmware: Skip programming provisioning FW. Compatible FW
must be programmed previously, this is user's responsibility
"""
self.connect("iotprovision", skip_programming=skip_program_provision_firmware)
# Set up basic connection parameters in modem, common for all cloud providers
self.logger.info("Setting up modem")
with AtProvisioner(self.fwinterface) as atprovisioner:
atprovisioner.set_provider(self.provider)
if self.frequency_bands:
atprovisioner.set_frequency_bands(self.provider, self.frequency_bands)
@staticmethod
def validate_int_list(values, valids, base=10):
"""
Validate list of integer values and convert to integer list.
It is assumed all integers in list are in same base.
:param values: String with comma-separated integers
:param valids: List of valid integer values
:param base: Base expected in input
:return: List of strings representing values in selected base. Modem expects a
specific base in list as a string, depending on command,
and all values must be in same base (decimal, hex, ...)
:raise: ValueError if invalid values specified
"""
if values is None:
return None
if not values:
return ""
valid = []
invalid = []
for value in values.split(","):
try:
if int(value, base=base) in valids:
valid.append(value)
else:
invalid.append(value)
except ValueError:
invalid.append(value)
if invalid:
raise ValueError(f"Invalid value(s): {','.join(invalid)}")
return valid
@staticmethod
def create_cert_chain(certfiles, outfile=None):
"""
Create a certificate chain, basically a concatenation of PEM files.
:param certfiles: List of input certificate file names in PEM format.
:param outfile: Optional output file name for saving chain
:return: Certificate chain
"""
chain = b""
for file in certfiles:
with open(file, "rb") as f:
chain += f.read()
# Make sure cert ends with a newline
if not chain.endswith(b"\n"):
s += b"\n"
if outfile:
with open(outfile, "w") as f:
f.write(chain)
return chain
class CellularProvisionerAzure(CellularProvisioner):
"""
Azure provisioning mechanisms for Cellular
"""
def __init__(self, programmer, skip_program_provision_fw=False, port=None):
super().__init__(programmer, skip_program_provision_fw, port=port)
self.cloud_provider = "azure"
raise NotImplementedError("'{}' not yet implemented".format(type(self).__name__))
class CellularProvisionerGoogle(CellularProvisioner):
"""
Google provisioning mechanism for Cellular
"""
def __init__(self, programmer, skip_program_provision_fw=False, port=None):
super().__init__(programmer, skip_program_provision_fw, port=port)
self.cloud_provider = "google"
raise NotImplementedError("'{}' not yet implemented".format(type(self).__name__))
class CellularProvisionerAws(CellularProvisioner):
"""
AWS Microchip Sandbox provisioning mechanism for Cellular
"""
def __init__(self, programmer, skip_program_provision_fw=False, port=None):
super().__init__(programmer, skip_program_provision_fw, port=port)
self.cloud_provider = "aws"
#pylint: disable=unused-argument
def generate_certificates(self, force, organization_name, root_common_name, signer_common_name):
"""
Generate CA certificates
Nothing to do for AWS Sandbox
"""
return
def create_device_certs_ecc(self, device_cert_file, signer_cert_file, force=False):
"""
Create device and signer certificate from ECC, if not already existing
:param device_cert_file: Device certificate filename
:param signer_cert_file: Signer certificate filename
:return: Thing name extracted from certificate
"""
self.logger.info("Generating device certificates")
device_cert, _ = build_certs_from_ecc(self.fwinterface.get_firmware_driver(),
signer_cert_file, device_cert_file,
force=force)
try:
# FIXME: Why is this thing name extraction different from custom provisioning?
ski = device_cert.extensions.get_extension_for_oid(
x509.oid.ExtensionOID.SUBJECT_KEY_IDENTIFIER).value.digest
thing_name = binascii.b2a_hex(ski).decode()
except x509.ExtensionNotFound:
pubkey = device_cert.public_key().public_bytes(encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
thing_name = hashlib.sha1(pubkey[-65:]).hexdigest()
return thing_name
def store_provisioning_data(self, thingname, endpoint, device_cert_file, root_ca_cert_file):
"""
Save provisioning data to kit.
:param thingname: AWS thing name
:param endpoint: AWS endpoint
:param device_cert: Device certificate in PEM format
:param root_ca_cert: Root CA certificate, PEM format
"""
aws_thing_file = Config.Certs.get_path("aws_thing_file", self.serialnumber)
self.store_iot_id(thingname, aws_thing_file)
self.logger.info("Writing to ECC slot 8:\n"
" Thing name: %s\n"
" Endpoint: %s", thingname, endpoint)
self.fwinterface.eccstorage.create_write_provinfo([
(EccStorage.AWS_THINGNAME, thingname),
(EccStorage.AWS_ENDPOINT, endpoint)])
# FIXME: Read back and verify. Remove this when we trust EccStorage entirely
self.logger.debug("Verify correct ECC write")
data = self.fwinterface.eccstorage.read_provinfo()
assert len(data) == 2
assert data[0] == (EccStorage.AWS_THINGNAME, thingname.encode())
assert data[1] == (EccStorage.AWS_ENDPOINT, endpoint.encode())
with open(root_ca_cert_file, "rb") as f:
root_ca_cert = f.read()
with open(device_cert_file, "rb") as f:
device_cert = f.read()
self.logger.info("Writing certificates to modem")
with AtProvisioner(self.fwinterface) as atprovisioner:
atprovisioner.write_slot("certificate", device_cert, self.DEVICE_CERT_SLOT)
atprovisioner.write_slot("certificate", root_ca_cert, self.ROOT_CERT_SLOT)
atprovisioner.set_security_profile(server_ca=self.ROOT_CERT_SLOT,
client_cert=self.DEVICE_CERT_SLOT,
ciphersuites=self.ciphersuites,
client_key_storage=self.client_key_storage)
def do_provision(self, force_new_device_certificate=False, skip_program_provision_firmware=False):
"""
Sandbox provisioning for AWS
"""
super().do_provision(force_new_device_certificate, skip_program_provision_firmware)
device_cert_file = Config.Certs.get_path("device_cert_file_sandbox", self.serialnumber)
signer_cert_file = Config.Certs.get_path("signer_cert_file_sandbox", self.serialnumber)
thingname = self.create_device_certs_ecc(device_cert_file, signer_cert_file, force=force_new_device_certificate)
self.store_provisioning_data(thingname, MCHP_SANDBOX_ATS_ENDPOINT, device_cert_file,
aws_get_root_ca_cert_filename("aws_ca_bundle"))
self.disconnect()
# Change the disk link after provisioning
kit_configure_disk_link(serialnumber=self.serialnumber,
cloud_provider=self.cloud_provider,
key2_value=thingname)
self.debugger_reboot_required = True
def get_aws_endpoint(self):
"""
Get AWS endpoint when custom provisioning is used
"""
# FIXME: This should be in pyawsutils!
aws_session = boto3.session.Session(profile_name=self.aws_profile)
aws_iot = aws_session.client("iot")
return aws_iot.describe_endpoint(endpointType="iot:Data-ATS").get("endpointAddress")
class CellularProvisionerAwsMar(ProvisionerAwsMar, CellularProvisionerAws):
"""
AWS MAR provisioning mechanism for Cellular
"""
def __init__(self, programmer, skip_program_provision_fw=False, port=None):
CellularProvisionerAws.__init__(self, programmer, skip_program_provision_fw, port=port)
def do_provision(self, force_new_device_certificate=False, skip_program_provision_firmware=False):
"""
Provisioning for AWS MAR
"""
#FIXME: This is almost the same as sandbox provisioning, consider common method with parameters
# (endpoint, disklink) if things don't change-
CellularProvisioner.do_provision(self, force_new_device_certificate, skip_program_provision_firmware)
device_cert_file = Config.Certs.get_path("device_cert_file", self.serialnumber)
signer_cert_file = Config.Certs.get_path("signer_cert_file", self.serialnumber)
thingname = self.create_device_certs_ecc(device_cert_file, signer_cert_file, force=force_new_device_certificate)
# Register device certificate without CA for custom provisioning with MAR
aws_mar_tool = aws_mar(aws_profile=self.aws_profile_name)
aws_mar_tool.create_device(certificate_file=device_cert_file,
policy_name="zt_policy", thing_type=None)
self.store_provisioning_data(thingname, self.get_aws_endpoint(), device_cert_file,
aws_get_root_ca_cert_filename("aws_ca_bundle"))
self.disconnect()
# Change the disk link after reprovisioning
# Note: disk link will not lead to data in the user's custom account.
kit_configure_disk_link(serialnumber=self.serialnumber,
cloud_provider='awscustom',
key2_value=thingname)
self.debugger_reboot_required = True
class CellularProvisionerAwsJitr(ProvisionerAwsJitr, CellularProvisionerAws):
"""
AWS JITR provisioning mechanism for Cellular
"""
def __init__(self, programmer, skip_program_provision_fw=False, port=None):
CellularProvisionerAws.__init__(self, programmer, skip_program_provision_fw, port=port)
def do_provision(self, force_new_device_certificate=False, skip_program_provision_firmware=False):
"""
Provisioning for AWS JITR
"""
CellularProvisioner.do_provision(self, force_new_device_certificate, skip_program_provision_firmware)
self.logger.info("Generating device certificate")
device_cert = build_device_cert(self.fwinterface.get_firmware_driver(),
Config.Certs.get_path("signer_ca_cert_file"),
Config.Certs.get_path("signer_ca_key_file"),
Config.Certs.get_path("device_csr_file", self.serialnumber),
Config.Certs.get_path("device_cert_file", self.serialnumber),
force=force_new_device_certificate)
thingname = None
for extension in device_cert.extensions:
if extension.oid._name != 'subjectKeyIdentifier':
continue # Not the extension we're looking for, skip
thingname = binascii.b2a_hex(extension.value.digest).decode('ascii')
self.store_provisioning_data(thingname, self.get_aws_endpoint(),
Config.Certs.get_path("device_cert_file", self.serialnumber),
aws_get_root_ca_cert_filename("aws_ca_bundle"))
self.disconnect()
# Change the disk link after reprovisioning
# Note: disk link will not lead to data in the user's custom account.
kit_configure_disk_link(serialnumber=self.serialnumber,
cloud_provider='awscustom',
key2_value=thingname)
self.debugger_reboot_required = True
|
# Inspired by https://github.com/pytorch/vision/blob/6e10e3f88158f12b7a304d3c2f803d2bbdde0823/torchvision/ops/boxes.py#L136
import numpy as np
import torch
from deeptech.model.layers.roi_ops import BoxToRoi
convert_to_corners = BoxToRoi()
def similarity_iou_2d(pred_boxes, true_boxes):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (cx, cy, w, h) format.
Arguments:
pred_boxes (Tensor[B, 4, N])
true_boxes (Tensor[B, 4, M])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
def area(boxes):
return (boxes[:, :, 2] - boxes[:, :, 0]) * (boxes[:, :, 3] - boxes[:, :, 1])
pred_boxes = convert_to_corners(pred_boxes).transpose(1, 2) # BN4
true_boxes = convert_to_corners(true_boxes).transpose(1, 2) # BN4
area1 = area(pred_boxes) # BN
area2 = area(true_boxes) # BM
lt = torch.max(pred_boxes[:,:, None, :2], true_boxes[:,:, :2]) # BNM2
rb = torch.min(pred_boxes[:,:, None, 2:], true_boxes[:,:, 2:]) # BNM2
wh = (rb - lt).clamp(min=0) # BNM2
inter = wh[:, :, :, 0] * wh[:, :, :, 1] # BNM
iou = inter / (area1[:, :, None] + area2 - inter) # BNM
return iou
|
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime, Boolean
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
arroba = Column(String(150), nullable=True)
tgid = Column(Integer, nullable=False)
account_id = Column(Integer, nullable=True)
US_id = Column(Integer, nullable=True)
EU_id = Column(Integer, nullable=True)
KR_id = Column(Integer, nullable=True)
TW_id = Column(Integer, nullable=True)
display_name = Column(String(50), nullable=True)
battle_tag = Column(String(50), nullable=True)
created_at = Column(DateTime, nullable=False)
modified_at = Column(DateTime, nullable=False)
|
#!/usr/bin/env python
import argparse
import os.path
import shutil
import sys
import urllib2
FL_URL='http://rdadolf.com/hosted/fathom-lite/data'
VERSION='1.0'
DATA_FILES=[
'mnist-inputs.npz',
'mnist-labels.npz',
'imagenet-inputs.npz',
'imagenet-labels.npz',
'babi-stories.npz',
'babi-questions.npz',
'babi-answers.npz',
]
class NetworkError(Exception): pass
class MissingDataError(Exception): pass
def check_host_has_data(url):
file_url = url+'/version'
response = urllib2.urlopen(file_url)
version_string = response.read().strip()
if version_string!=VERSION:
raise MissingDataError('Server alive but does not appear to have up-to-date Fathom-lite data.')
return True
def download_if_not_cached(file_url, file_dest, force=False):
if os.path.isfile(file_dest) and not force:
print 'File cache found: '+file_dest
return False # File cached. (To force download, just remove the file.)
dir = os.path.dirname(os.path.abspath(file_dest))
if not os.path.isdir(dir):
raise IOError('Destination directory "'+str(dir)+'" does not exist')
try:
print 'Downloading '+file_url
response = urllib2.urlopen(file_url)
with open(file_dest,'wb') as f:
shutil.copyfileobj(response,f)
except urllib2.HTTPError:
print 'Error when downloading '+file_url
raise
return file_dest
def get_options():
cli = argparse.ArgumentParser('Download Fathom-lite data files from the Internet.')
cli.add_argument('-f','--force',
default=False, action='store_true',
help='Ignore file caches')
cli.add_argument('-n','--no-version',
default=False, action='store_true',
help='Do not check server for data version number')
cli.add_argument('-d','--dir',
default='.', type=str,
help='Download data to alternate directory.')
return cli.parse_args()
if __name__=='__main__':
opts = get_options()
if not opts.force:
status = check_host_has_data(FL_URL)
for filename in DATA_FILES:
file_url = FL_URL+'/'+filename
file_dest = os.path.normpath(opts.dir+'/'+filename)
download_if_not_cached(file_url, file_dest, force=opts.force)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import division
import copy
import psychopy
from .text import TextStim
from psychopy.data.utils import importConditions, listFromString
from psychopy.visual.basevisual import (BaseVisualStim,
ContainerMixin,
ColorMixin)
from psychopy import logging
from random import shuffle
from pathlib import Path
from psychopy.constants import PY3
__author__ = 'Jon Peirce, David Bridges, Anthony Haffey'
from ..colors import Color
_REQUIRED = -12349872349873 # an unlikely int
# a dict of known fields with their default vals
_knownFields = {
'index': None, # optional field to index into the rows
'itemText': _REQUIRED, # (question used until 2020.2)
'itemColor': None,
'itemWidth': 0.8, # fraction of the form
'type': _REQUIRED, # type of response box (see below)
'options': ('Yes', 'No'), # for choice box
'ticks': None,#(1, 2, 3, 4, 5, 6, 7),
'tickLabels': None,
'font': None,
# for rating/slider
'responseWidth': 0.8, # fraction of the form
'responseColor': None,
'markerColor': None,
'layout': 'horiz', # can be vert or horiz
}
_doNotSave = [
'itemCtrl', 'responseCtrl', # these genuinely can't be save
'itemColor', 'itemWidth', 'options', 'ticks', 'tickLabels', # not useful?
'responseWidth', 'responseColor', 'layout',
]
_knownRespTypes = {
'heading', 'description', # no responses
'rating', 'slider', # slider is continuous
'free text',
'choice', 'radio' # synonyms (radio was used until v2020.2)
}
_synonyms = {
'itemText': 'questionText',
'choice': 'radio',
'free text': 'textBox'
}
class Form(BaseVisualStim, ContainerMixin, ColorMixin):
"""A class to add Forms to a `psychopy.visual.Window`
The Form allows Psychopy to be used as a questionnaire tool, where
participants can be presented with a series of questions requiring responses.
Form items, defined as questions and response pairs, are presented
simultaneously onscreen with a scrollable viewing window.
Example
-------
survey = Form(win, items=[{}], size=(1.0, 0.7), pos=(0.0, 0.0))
Parameters
----------
win : psychopy.visual.Window
The window object to present the form.
items : List of dicts or csv or xlsx file
a list of dicts or csv file should have the following key, value pairs / column headers:
"index": The item index as a number
"itemText": item question string,
"itemWidth": fraction of the form width 0:1
"type": type of rating e.g., 'radio', 'rating', 'slider'
"responseWidth": fraction of the form width 0:1,
"options": list of tick labels for options,
"layout": Response object layout e.g., 'horiz' or 'vert'
textHeight : float
Text height.
size : tuple, list
Size of form on screen.
pos : tuple, list
Position of form on screen.
itemPadding : float
Space or padding between form items.
units : str
units for stimuli - Currently, Form class only operates with 'height' units.
randomize : bool
Randomize order of Form elements
"""
knownStyles = {
'light': {
'fillColor': [0.89, 0.89, 0.89],
'borderColor': None,
'itemColor': 'black',
'responseColor': 'black',
'markerColor': [0.89, -0.35, -0.28],
'font': "Open Sans",
},
'dark': {
'fillColor': [-0.19, -0.19, -0.14],
'borderColor': None,
'itemColor': 'white',
'responseColor': 'white',
'markerColor': [0.89, -0.35, -0.28],
'font': "Open Sans",
},
}
def __init__(self,
win,
name='default',
colorSpace='rgb',
fillColor=None,
borderColor=None,
itemColor='white',
responseColor='white',
markerColor='red',
items=None,
font=None,
textHeight=.02,
size=(.5, .5),
pos=(0, 0),
style=None,
itemPadding=0.05,
units='height',
randomize=False,
autoLog=True,
# legacy
color=None,
foreColor=None
):
super(Form, self).__init__(win, units, autoLog=False)
self.win = win
self.autoLog = autoLog
self.name = name
self.randomize = randomize
self.items = self.importItems(items)
self.size = size
self._pos = pos
self.itemPadding = itemPadding
self.scrollSpeed = self.setScrollSpeed(self.items, 4)
self.units = units
self.depth = 0
# Appearance
self.colorSpace = colorSpace
self.fillColor = fillColor
self.borderColor = borderColor
self.itemColor = itemColor
self.responseColor = responseColor
self.markerColor = markerColor
if color:
self.foreColor = color
if foreColor:
self.foreColor = color
self.style = style
self.font = font or "Open Sans"
self.textHeight = textHeight
self._scrollBarSize = (0.016, self.size[1]/1.2)
self._baseYpositions = []
self.leftEdge = None
self.rightEdge = None
self.topEdge = None
self._currentVirtualY = 0 # Y position in the virtual sheet
self._decorations = []
self._externalDecorations = []
# Check units - only works with height units for now
if self.win.units != 'height':
logging.warning(
"Form currently only formats correctly using height units. "
"Please change the units in Experiment Settings to 'height'")
self._complete = False
# Create layout of form
self._createItemCtrls()
if self.autoLog:
logging.exp("Created {} = {}".format(self.name, repr(self)))
def __repr__(self, complete=False):
return self.__str__(complete=complete) # from MinimalStim
def importItems(self, items):
"""Import items from csv or excel sheet and convert to list of dicts.
Will also accept a list of dicts.
Note, for csv and excel files, 'options' must contain comma separated values,
e.g., one, two, three. No parenthesis, or quotation marks required.
Parameters
----------
items : Excel or CSV file, list of dicts
Items used to populate the Form
Returns
-------
List of dicts
A list of dicts, where each list entry is a dict containing all fields for a single Form item
"""
def _checkSynonyms(items, fieldNames):
"""Checks for updated names for fields (i.e. synonyms)"""
replacedFields = set()
for field in _synonyms:
synonym = _synonyms[field]
for item in items:
if synonym in item:
# convert to new name
item[field] = item[synonym]
del item[synonym]
replacedFields.add(field)
for field in replacedFields:
fieldNames.append(field)
fieldNames.remove(_synonyms[field])
logging.warning("Form {} included field no longer used {}. "
"Replacing with new name '{}'"
.format(self.name, _synonyms[field], field))
def _checkRequiredFields(fieldNames):
"""Checks for required headings (do this after checking synonyms)"""
for hdr in _knownFields:
# is it required and/or present?
if _knownFields[hdr] == _REQUIRED and hdr not in fieldNames:
raise ValueError("Missing header ({}) in Form ({}). "
"Headers found were: {}"
.format(hdr, self.name, fieldNames))
def _checkTypes(types, itemText):
"""A nested function for testing the number of options given
Raises ValueError if n Options not > 1
"""
itemDiff = set([types]) - set(_knownRespTypes)
for incorrItemType in itemDiff:
if incorrItemType == _REQUIRED:
if self._itemsFile:
itemsFileStr = ("in items file '{}'"
.format(self._itemsFile))
else:
itemsFileStr = ""
msg = ("Item {}{} is missing a required "
"value for its response type. Permitted types are "
"{}.".format(itemText, itemsFileStr,
_knownRespTypes))
if self.autoLog:
logging.error(msg)
raise ValueError(msg)
def _addDefaultItems(items):
"""
Adds default items when missing. Works in-place.
Parameters
----------
items : List of dicts
headers : List of column headers for each item
"""
def isPresent(d, field):
# check if the field is there and not empty on this row
return (field in d and d[field] not in [None, ''])
missingHeaders = []
defaultValues = _knownFields
for index, item in enumerate(items):
defaultValues['index'] = index
for header in defaultValues:
# if header is missing of val is None or ''
if not isPresent(item, header):
oldHeader = header.replace('item', 'question')
if isPresent(item, oldHeader):
item[header] = item[oldHeader]
logging.warning(
"{} is a deprecated heading for Forms. "
"Use {} instead"
.format(oldHeader, header)
)
continue
# Default to colour scheme if specified
if defaultValues[header] in ['fg', 'bg', 'em']:
item[header] = self.color
else:
item[header] = defaultValues[header]
missingHeaders.append(header)
msg = "Using default values for the following headers: {}".format(
missingHeaders)
if self.autoLog:
logging.info(msg)
if self.autoLog:
logging.info("Importing items...")
if not isinstance(items, list):
# items is a conditions file
self._itemsFile = Path(items)
items, fieldNames = importConditions(items, returnFieldNames=True)
else: # we already have a list so lets find the fieldnames
fieldNames = set()
for item in items:
fieldNames = fieldNames.union(item)
fieldNames = list(fieldNames) # convert to list at the end
self._itemsFile = None
_checkSynonyms(items, fieldNames)
_checkRequiredFields(fieldNames)
# Add default values if entries missing
_addDefaultItems(items)
# Convert options to list of strings
for idx, item in enumerate(items):
if item['ticks']:
item['ticks'] = listFromString(item['ticks'])
if 'tickLabels' in item and item['tickLabels']:
item['tickLabels'] = listFromString(item['tickLabels'])
if 'options' in item and item['options']:
item['options'] = listFromString(item['options'])
# Check types
[_checkTypes(item['type'], item['itemText']) for item in items]
# Check N options > 1
# Randomise items if requested
if self.randomize:
shuffle(items)
return items
def setScrollSpeed(self, items, multiplier=2):
"""Set scroll speed of Form. Higher multiplier gives smoother, but
slower scroll.
Parameters
----------
items : list of dicts
Items used to populate the form
multiplier : int (default=2)
Number used to calculate scroll speed
Returns
-------
int
Scroll speed, calculated using N items by multiplier
"""
return len(items) * multiplier
def _getItemRenderedWidth(self, size):
"""Returns text width for item text based on itemWidth and Form width.
Parameters
----------
size : float, int
The question width
Returns
-------
float
Wrap width for question text
"""
return size * self.size[0] - (self.itemPadding * 2)
def _setQuestion(self, item):
"""Creates TextStim object containing question
Parameters
----------
item : dict
The dict entry for a single item
Returns
-------
psychopy.visual.text.TextStim
The textstim object with the question string
questionHeight
The height of the question bounding box as type float
questionWidth
The width of the question bounding box as type float
"""
if self.autoLog:
logging.exp(
u"Question text: {}".format(item['itemText']))
if item['type'] == 'heading':
letterScale = 1.5
bold = True
else:
letterScale = 1.0
bold = False
w = self._getItemRenderedWidth(item['itemWidth'])
question = psychopy.visual.TextBox2(
self.win,
text=item['itemText'],
units=self.units,
letterHeight=self.textHeight * letterScale,
anchor='top-left',
pos=(self.leftEdge+self.itemPadding, 0), # y pos irrelevant
size=[w, None], # expand height with text
autoLog=False,
colorSpace=self.colorSpace,
color=item['itemColor'] or self.itemColor,
fillColor=None,
padding=0, # handle this by padding between items
borderWidth=1,
borderColor=None, # add borderColor to help debug
editable=False,
bold=bold,
font=item['font'] or self.font)
questionHeight = question.size[1]
questionWidth = question.size[0]
# store virtual pos to combine with scroll bar for actual pos
question._baseY = self._currentVirtualY
# Add question objects to Form element dict
item['itemCtrl'] = question
return question, questionHeight, questionWidth
def _setResponse(self, item):
"""Makes calls to methods which make Slider or TextBox response objects
for Form
Parameters
----------
item : dict
The dict entry for a single item
question : TextStim
The question text object
Returns
-------
psychopy.visual.slider.Slider
The Slider object for response
psychopy.visual.TextBox
The TextBox object for response
respHeight
The height of the response object as type float
"""
if self.autoLog:
logging.info(
"Adding response to Form type: {}, layout: {}, options: {}"
.format(item['type'], item['layout'], item['options']))
if item['type'].lower() == 'free text':
respCtrl, respHeight = self._makeTextBox(item)
elif item['type'].lower() in ['heading', 'description']:
respCtrl, respHeight = None, 0
elif item['type'].lower() in ['rating', 'slider', 'choice', 'radio']:
respCtrl, respHeight = self._makeSlider(item)
item['responseCtrl'] = respCtrl
return respCtrl, float(respHeight)
def _makeSlider(self, item):
"""Creates Slider object for Form class
Parameters
----------
item : dict
The dict entry for a single item
pos : tuple
position of response object
Returns
-------
psychopy.visual.slider.Slider
The Slider object for response
respHeight
The height of the response object as type float
"""
# Slider dict
def _sliderLabelWidths():
return (item['responseWidth'] * self.size[0]) \
/ (len(item['options']))
kind = item['type'].lower()
# what are the ticks for the scale/slider?
if item['type'].lower() in ['radio', 'choice']:
if item['ticks']:
ticks = item['ticks']
else:
ticks = None
tickLabels = item['tickLabels'] or item['options'] or item['ticks']
granularity = 1
style = 'radio'
else:
if item['ticks']:
ticks = item['ticks']
elif item['options']:
ticks = range(0, len(item['options']))
else:
raise ValueError("We don't appear to have either options or "
"ticks for item '{}' of {}."
.format(item['itemText'], self.name))
# how to label those ticks
if item['tickLabels']:
tickLabels = [str(i).strip() for i in item['tickLabels']]
elif 'options' in item and item['options']:
tickLabels = [str(i).strip() for i in item['options']]
else:
tickLabels = None
# style/granularity
if kind == 'slider' and 'granularity' in item:
if item['granularity']:
granularity = item['granularity']
else:
granularity = 0
elif kind == 'slider' and 'granularity' not in item:
granularity = 0
else:
granularity = 1
style = kind
# Create x position of response object
x = self.pos[0]
# Set radio button layout
if item['layout'] == 'horiz':
w = (item['responseWidth'] * self.size[0]
- self._scrollBarSize[0] - self.itemPadding) * 0.8
h = 0.03
elif item['layout'] == 'vert':
# for vertical take into account the nOptions
w = 0.03
h = self.textHeight*len(item['options'])
item['options'].reverse()
# Create Slider
resp = psychopy.visual.Slider(
self.win,
pos=(x, 0), # NB y pos is irrelevant here - handled later
size=(w, h),
ticks=ticks,
labels=tickLabels,
units=self.units,
labelHeight=self.textHeight,
labelWrapWidth=_sliderLabelWidths(),
granularity=granularity,
flip=True,
style=style,
autoLog=False,
font=item['font'] or self.font,
color=item['responseColor'] or self.responseColor,
fillColor=item['markerColor'] or self.markerColor,
borderColor=item['responseColor'] or self.responseColor,
colorSpace=self.colorSpace)
if item['layout'] == 'horiz':
h += self.textHeight*2
# store virtual pos to combine with scroll bar for actual pos
resp._baseY = self._currentVirtualY - h/2 - self.itemPadding
return resp, h
def _getItemHeight(self, item, ctrl=None):
"""Returns the full height of the item to be inserted in the form"""
if type(ctrl) == psychopy.visual.TextBox2:
return ctrl.size[1]
if type(ctrl) == psychopy.visual.Slider:
# Set radio button layout
if item['layout'] == 'horiz':
return 0.03 + ctrl.labelHeight*3
elif item['layout'] == 'vert':
# for vertical take into account the nOptions
return ctrl.labelHeight*len(item['options'])
def _makeTextBox(self, item):
"""Creates TextBox object for Form class
NOTE: The TextBox 2 in work in progress, and has not been added to Form class yet.
Parameters
----------
item : dict
The dict entry for a single item
pos : tuple
position of response object
Returns
-------
psychopy.visual.TextBox
The TextBox object for response
respHeight
The height of the response object as type float
"""
w = (item['responseWidth']*self.size[0]
- self.itemPadding - self._scrollBarSize[0])
x = self.rightEdge-self.itemPadding-self._scrollBarSize[0]
resp = psychopy.visual.TextBox2(
self.win,
text='',
pos=(x, 0), # y pos irrelevant now (handled by scrollbar)
size=(w, None),
letterHeight=self.textHeight,
units=self.units,
anchor='top-right',
color=item['responseColor'] or self.responseColor,
colorSpace=self.colorSpace,
font=item['font'] or self.font,
editable=True,
borderColor=item['responseColor'] or self.responseColor,
borderWidth=2,
fillColor=None,
onTextCallback=self._layoutY,
)
respHeight = resp.size[1]
# store virtual pos to combine with scroll bar for actual pos
resp._baseY = self._currentVirtualY
return resp, respHeight
def _setScrollBar(self):
"""Creates Slider object for scrollbar
Returns
-------
psychopy.visual.slider.Slider
The Slider object for scroll bar
"""
scroll = psychopy.visual.Slider(win=self.win,
size=self._scrollBarSize,
ticks=[0, 1],
style='scrollbar',
borderColor=self.responseColor,
fillColor=self.markerColor,
pos=(self.rightEdge - .008, self.pos[1]),
autoLog=False)
return scroll
def _setBorder(self):
"""Creates border using Rect
Returns
-------
psychopy.visual.Rect
The border for the survey
"""
return psychopy.visual.Rect(win=self.win,
units=self.units,
pos=self.pos,
width=self.size[0],
height=self.size[1],
colorSpace=self.colorSpace,
fillColor=self.fillColor,
lineColor=self.borderColor,
opacity=None,
autoLog=False)
def _setAperture(self):
"""Blocks text beyond border using Aperture
Returns
-------
psychopy.visual.Aperture
The aperture setting viewable area for forms
"""
aperture = psychopy.visual.Aperture(win=self.win,
name='aperture',
units=self.units,
shape='square',
size=self.size,
pos=self.pos,
autoLog=False)
aperture.disable() # Disable on creation. Only enable on draw.
return aperture
def _getScrollOffset(self):
"""Calculate offset position of items in relation to markerPos
Returns
-------
float
Offset position of items proportionate to scroll bar
"""
sizeOffset = (1-self.scrollbar.markerPos) * self.size[1]
maxItemPos = self._currentVirtualY - self.size[1]
if maxItemPos > -self.size[1]:
return 0
return maxItemPos*(1- self.scrollbar.markerPos) + sizeOffset
def _createItemCtrls(self):
"""Define layout of form"""
# Define boundaries of form
if self.autoLog:
logging.info("Setting layout of Form: {}.".format(self.name))
self.leftEdge = self.pos[0] - self.size[0] / 2.0
self.rightEdge = self.pos[0] + self.size[0] / 2.0
# For each question, create textstim and rating scale
for item in self.items:
# set up the question object
self._setQuestion(item)
# set up the response object
self._setResponse(item)
# position a slider on right-hand edge
self.scrollbar = self._setScrollBar()
self.scrollbar.markerPos = 1 # Set scrollbar to start position
self.border = self._setBorder()
self.aperture = self._setAperture()
# then layout the Y positions
self._layoutY()
if self.autoLog:
logging.info("Layout set for Form: {}.".format(self.name))
def _layoutY(self):
"""This needs to be done when editable textboxes change their size
because everything below them needs to move too"""
self.topEdge = self.pos[1] + self.size[1] / 2.0
self._currentVirtualY = self.topEdge - self.itemPadding
# For each question, create textstim and rating scale
for item in self.items:
question = item['itemCtrl']
response = item['responseCtrl']
# update item baseY
question._baseY = self._currentVirtualY
# and get height to update current Y
questionHeight = self._getItemHeight(item=item, ctrl=question)
# go on to next line if together they're too wide
oneLine = (item['itemWidth']+item['responseWidth'] > 1
or not response)
if oneLine:
# response on next line
self._currentVirtualY -= questionHeight + self.itemPadding
# update response baseY
if not response:
continue
# get height to update current Y
respHeight = self._getItemHeight(item=item, ctrl=response)
# update item baseY
# slider needs to align by middle
if type(response) == psychopy.visual.Slider:
response._baseY = self._currentVirtualY - respHeight/2
else: # hopefully we have an object that can anchor at top?
response._baseY = self._currentVirtualY
# go on to next line if together they're too wide
if oneLine:
# response on same line - work out which is bigger
self._currentVirtualY -= (
max(questionHeight, respHeight) + self.itemPadding
)
else:
# response on next line
self._currentVirtualY -= respHeight + self.itemPadding
self._setDecorations() # choose whether show/hide scroolbar
def _setDecorations(self):
"""Sets Form decorations i.e., Border and scrollbar"""
# add scrollbar if it's needed
self._decorations = [self.border]
fractionVisible = self.size[1] / (-self._currentVirtualY)
if fractionVisible < 1.0:
self._decorations.append(self.scrollbar)
def _inRange(self, item):
"""Check whether item position falls within border area
Parameters
----------
item : TextStim, Slider object
TextStim or Slider item from survey
Returns
-------
bool
Returns True if item position falls within border area
"""
upperRange = self.size[1]
lowerRange = -self.size[1]
return (item.pos[1] < upperRange and item.pos[1] > lowerRange)
def _drawDecorations(self):
"""Draw decorations on form."""
[decoration.draw() for decoration in self._decorations]
def _drawExternalDecorations(self):
"""Draw decorations outside the aperture"""
[decoration.draw() for decoration in self._externalDecorations]
def _drawCtrls(self):
"""Draw elements on form within border range.
Parameters
----------
items : List
List of TextStim or Slider item from survey
"""
for idx, item in enumerate(self.items):
for element in [item['itemCtrl'], item['responseCtrl']]:
if element is None: # e.g. because this has no resp obj
continue
element.pos = (element.pos[0],
element._baseY - self._getScrollOffset())
if self._inRange(element):
element.draw()
def setAutoDraw(self, value, log=None):
"""Sets autoDraw for Form and any responseCtrl contained within
"""
for i in self.items:
if i['responseCtrl']:
i['responseCtrl'].__dict__['autoDraw'] = value
self.win.addEditable(i['responseCtrl'])
BaseVisualStim.setAutoDraw(self, value, log)
def draw(self):
"""Draw all form elements"""
# Check mouse wheel
self.scrollbar.markerPos += self.scrollbar.mouse.getWheelRel()[
1] / self.scrollSpeed
# draw the box and scrollbar
self._drawExternalDecorations()
# enable aperture
self.aperture.enable()
# draw the box and scrollbar
self._drawDecorations()
# Draw question and response objects
self._drawCtrls()
# disable aperture
self.aperture.disable()
def getData(self):
"""Extracts form questions, response ratings and response times from
Form items
Returns
-------
list
A copy of the data as a list of dicts
"""
nIncomplete = 0
nIncompleteRequired = 0
for thisItem in self.items:
if 'responseCtrl' not in thisItem or not thisItem['responseCtrl']:
continue # maybe a heading or similar
responseCtrl = thisItem['responseCtrl']
# get response if available
if hasattr(responseCtrl, 'getRating'):
thisItem['response'] = responseCtrl.getRating()
else:
thisItem['response'] = responseCtrl.text
if thisItem['response'] in [None, '']:
# todo : handle required items here (e.g. ending with * ?)
nIncomplete += 1
# get RT if available
if hasattr(responseCtrl, 'getRT'):
thisItem['rt'] = responseCtrl.getRT()
else:
thisItem['rt'] = None
self._complete = (nIncomplete == 0)
return copy.copy(self.items) # don't want users changing orig
def addDataToExp(self, exp, itemsAs='rows'):
"""Gets the current Form data and inserts into an
:class:`~psychopy.experiment.ExperimentHandler` object either as rows
or as columns
Parameters
----------
exp : :class:`~psychopy.experiment.ExperimentHandler`
itemsAs: 'rows','cols' (or 'columns')
Returns
-------
"""
data = self.getData() # will be a copy of data (we can trash it)
asCols = itemsAs.lower() in ['cols', 'columns']
# iterate over items and fields within each item
# iterate all items and all fields before calling nextEntry
for ii, thisItem in enumerate(data): # data is a list of dicts
for fieldName in thisItem:
if fieldName in _doNotSave:
continue
if asCols: # for columns format, we need index for item
columnName = "{}[{}].{}".format(self.name, ii, fieldName)
else:
columnName = "{}.{}".format(self.name, fieldName)
exp.addData(columnName, thisItem[fieldName])
# finished field
if not asCols: # for rows format we add a newline each item
exp.nextEntry()
# finished item
# finished form
if asCols: # for cols format we add a newline each item
exp.nextEntry()
def formComplete(self):
"""Deprecated in version 2020.2. Please use the Form.complete property
"""
return self.complete
@property
def pos(self):
if hasattr(self, '_pos'):
return self._pos
@pos.setter
def pos(self, value):
self._pos = value
if hasattr(self, 'aperture'):
self.aperture.pos = value
if hasattr(self, 'border'):
self.border.pos = value
self.leftEdge = self.pos[0] - self.size[0] / 2.0
self.rightEdge = self.pos[0] + self.size[0] / 2.0
# Set horizontal position of elements
for item in self.items:
for element in [item['itemCtrl'], item['responseCtrl']]:
if element is None: # e.g. because this has no resp obj
continue
element.pos = [value[0], element.pos[1]]
element._baseY = value[1]
if hasattr(element, 'anchor'):
element.anchor = 'top-center'
# Calculate new position for everything on the y axis
self.scrollbar.pos = (self.rightEdge - .008, self.pos[1])
self._layoutY()
@property
def complete(self):
"""A read-only property to determine if the current form is complete"""
self.getData()
return self._complete
@property
def foreColor(self):
"""
Sets both `itemColor` and `responseColor` to the same value
"""
return ColorMixin.foreColor.fget(self)
@foreColor.setter
def foreColor(self, value):
ColorMixin.foreColor.fset(self, value)
self.itemColor = value
self.responseColor = value
@property
def fillColor(self):
"""
Color of the form's background
"""
return ColorMixin.fillColor.fget(self)
@fillColor.setter
def fillColor(self, value):
ColorMixin.fillColor.fset(self, value)
if hasattr(self, "border"):
self.border.fillColor = value
@property
def borderColor(self):
"""
Color of the line around the form
"""
return ColorMixin.borderColor.fget(self)
@borderColor.setter
def borderColor(self, value):
ColorMixin.borderColor.fset(self, value)
if hasattr(self, "border"):
self.border.borderColor = value
@property
def itemColor(self):
"""
Color of the text on form items
"""
return self._itemColor
@itemColor.setter
def itemColor(self, value):
self._itemColor = value
# Set text color on each item
for item in self.items:
if 'itemCtrl' in item:
if isinstance(item['itemCtrl'], psychopy.visual.TextBox2):
item['itemCtrl'].foreColor = value
@property
def responseColor(self):
"""
Color of the lines and text on form responses
"""
if hasattr(self, "_responseColor"):
return self._responseColor
@responseColor.setter
def responseColor(self, value):
self._responseColor = value
# Set line color on scrollbar
if hasattr(self, "scrollbar"):
self.scrollbar.borderColor = value
# Set line and label color on each item
for item in self.items:
if 'responseCtrl' in item:
if isinstance(item['responseCtrl'], psychopy.visual.Slider):
item['responseCtrl'].borderColor = value
item['responseCtrl'].foreColor = value
@property
def markerColor(self):
"""
Color of the marker on any sliders in this form
"""
if hasattr(self, "_markerColor"):
return self._markerColor
@markerColor.setter
def markerColor(self, value):
self._markerColor = value
# Set marker color on scrollbar
if hasattr(self, "scrollbar"):
self.scrollbar.fillColor = value
# Set marker color on each item
for item in self.items:
if 'responseCtrl' in item:
if isinstance(item['responseCtrl'], psychopy.visual.Slider):
item['responseCtrl'].fillColor = value
@property
def style(self):
if hasattr(self, "_style"):
return self._style
@style.setter
def style(self, style):
"""Sets some predefined styles or use these to create your own.
If you fancy creating and including your own styles that would be great!
Parameters
----------
style: string
Known styles currently include:
'light': black text on a light background
'dark': white text on a dark background
"""
self._style = style
# If style is custom, skip the rest
if style in ['custom...', 'None', None]:
return
# If style is a string of a known style, use that
if style in self.knownStyles:
style = self.knownStyles[style]
# By here, style should be a dict
if not isinstance(style, dict):
return
# Apply each key in the style dict as an attr
for key, val in style.items():
if hasattr(self, key):
setattr(self, key, val)
@property
def values(self):
# Iterate through each control and append its value to a dict
out = {}
for item in self.getData():
out.update(
{item['index']: item['response']}
)
return out
@values.setter
def values(self, values):
for item in self.items:
if item['index'] in values:
ctrl = item['responseCtrl']
# set response if available
if hasattr(ctrl, "rating"):
ctrl.rating = values[item['index']]
elif hasattr(ctrl, "value"):
ctrl.value = values[item['index']]
else:
ctrl.text = values[item['index']]
|
try:
import psyco
psyco.full()
except ImportError:
pass
MAX_N = 300
BRANCH = 4
ra = [0] * MAX_N
unrooted = [0] * MAX_N
def tree(br, n, l, sum = 1, cnt = 1):
global ra, unrooted, MAX_N, BRANCH
for b in xrange(br + 1, BRANCH + 1):
sum += n
if sum >= MAX_N:
return
# prevent unneeded long math
if l * 2 >= sum and b >= BRANCH:
return
if b == br + 1:
c = ra[n] * cnt
else:
c = c * (ra[n] + (b - br - 1)) / (b - br)
if l * 2 < sum:
unrooted[sum] += c
if b < BRANCH:
ra[sum] += c;
for m in range(1, n):
tree(b, m, l, sum, c)
def bicenter(s):
global ra, unrooted
if not (s & 1):
aux = ra[s / 2]
unrooted[s] += aux * (aux + 1) / 2
def main():
global ra, unrooted, MAX_N
ra[0] = ra[1] = unrooted[0] = unrooted[1] = 1
for n in xrange(1, MAX_N):
tree(0, n, n)
bicenter(n)
print "%d: %d" % (n, unrooted[n])
main()
|
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file that contains functions to embed the charts.
from ..fusioncharts import FusionCharts
from ..models import *
# The `chart` function is defined to load data from a `Country` Model.
# This data will be converted to JSON and the chart will be rendered.
def chart(request):
# Chart data is passed to the `dataSource` parameter, as dict, in the form of key-value pairs.
dataSource = {}
dataSource['chart'] = {
"caption": "Top 10 Most Populous Countries",
"paletteColors": "#0075c2",
"bgColor": "#ffffff",
"borderAlpha": "20",
"canvasBorderAlpha": "0",
"usePlotGradientColor": "0",
"plotBorderAlpha": "10",
"showXAxisLine": "1",
"xAxisLineColor": "#999999",
"showValues": "0",
"divlineColor": "#999999",
"divLineIsDashed": "1",
"showAlternateHGridColor": "0"
}
# Convert the data in the `Country` model into a format that can be consumed by FusionCharts.
# The data for the chart should be in an array where in each element of the array is a JSON object
# having the `label` and `value` as keys.
dataSource['data'] = []
dataSource['linkeddata'] = []
# Iterate through the data in `Country` model and insert in to the `dataSource['data']` list.
for key in Country.objects.all():
data = {}
data['label'] = key.Name
data['value'] = key.Population
# Create link for each country when a data plot is clicked.
data['link'] = 'newchart-json-' + key.Code
dataSource['data'].append(data)
# Create the linkData for cities drilldown
linkData = {}
# Inititate the linkData for cities drilldown
linkData['id'] = key.Code
linkedchart = {}
linkedchart['chart'] = {
"caption": "Top 10 Most Populous Cities - " + key.Name,
"paletteColors": "#0075c2",
"bgColor": "#ffffff",
"borderAlpha": "20",
"canvasBorderAlpha": "0",
"usePlotGradientColor": "0",
"plotBorderAlpha": "10",
"showXAxisLine": "1",
"xAxisLineColor": "#999999",
"showValues": "0",
"divlineColor": "#999999",
"divLineIsDashed": "1",
"showAlternateHGridColor": "0"
}
# Convert the data in the `City` model into a format that can be consumed by FusionCharts.
linkedchart['data'] = []
# Filtering the data base on the Country Code
for key in City.objects.all().filter(CountryCode=key.Code):
arrDara = {}
arrDara['label'] = key.Name
arrDara['value'] = key.Population
linkedchart['data'].append(arrDara)
linkData['linkedchart'] = linkedchart
dataSource['linkeddata'].append(linkData)
# Create an object for the Column 2D chart using the FusionCharts class constructor
column2D = FusionCharts("column2D", "ex1", "600", "400", "chart-1", "json", dataSource)
return render(request, 'index.html', {'output': column2D.render()})
|
"""
Attacks for TensorFlow Eager
"""
from distutils.version import LooseVersion
import numpy as np
from six.moves import xrange
import tensorflow as tf
from cleverhans import attacks
from cleverhans import utils
from cleverhans.compat import reduce_sum
from cleverhans.model import CallableModelWrapper
from cleverhans.model import Model
from cleverhans.loss import LossCrossEntropy
_logger = utils.create_logger("cleverhans.attacks_tfe")
if LooseVersion(tf.__version__) < LooseVersion('1.8.0'):
error_msg = ('For eager execution',
'use Tensorflow version greather than 1.8.0.')
raise ValueError(error_msg)
class Attack(attacks.Attack):
"""
Abstract base class for all eager attack classes.
:param model: An instance of the cleverhans.model.Model class.
:param back: The backend to use. Inherited from AttackBase class.
:param dtypestr: datatype of the input data samples and crafted
adversarial attacks.
"""
def __init__(self, model, dtypestr='float32'):
super(Attack, self).__init__(model, dtypestr=dtypestr)
# Validate the input arguments.
if dtypestr != 'float32' and dtypestr != 'float64':
raise ValueError("Unexpected input for argument dtypestr.")
self.tf_dtype = tf.as_dtype(dtypestr)
self.np_dtype = np.dtype(dtypestr)
if not isinstance(model, Model):
raise ValueError("The model argument should be an instance of"
" the cleverhans.model.Model class.")
# Prepare attributes
self.model = model
self.dtypestr = dtypestr
def construct_graph(self, **kwargs):
"""
Constructs the graph required to run the attacks.
Is inherited from the attack class, is overloaded
to raise an error.
"""
error = "This method is not required for eager execution."
raise AttributeError(error)
def generate_np(self, x_val, **kwargs):
"""
Generate adversarial examples and return them as a NumPy array.
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples.
"""
tfe = tf.contrib.eager
x = tfe.Variable(x_val)
adv_x = self.generate(x, **kwargs)
return adv_x.numpy()
def construct_variables(self, kwargs):
"""
Construct the inputs to the attack graph.
Is inherited from the attack class, is overloaded
to raise an error.
"""
error = "This method is not required for eager execution."
raise AttributeError(error)
class FastGradientMethod(Attack, attacks.FastGradientMethod):
"""
Inherited class from Attack and cleverhans.attacks.FastGradientMethod.
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm (and is known as the "Fast Gradient Sign Method"). This
implementation extends the attack to other norms, and is therefore called
the Fast Gradient Method.
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model, dtypestr='float32', **kwargs):
"""
Creates a FastGradientMethod instance in eager execution.
:model: CNN network, should be an instance of
cleverhans.model.Model, if not wrap
the output to probs.
:dtypestr: datatype in the string format.
"""
del kwargs
if not isinstance(model, Model):
model = CallableModelWrapper(model, 'probs')
super(FastGradientMethod, self).__init__(model, dtypestr)
def generate(self, x, **kwargs):
"""
Generates the adversarial sample for the given input.
:param x: The model's inputs.
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tf variable` with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param y_target: (optional) A tf variable` with the labels to target.
Leave y_target=None if y is also set.
Labels should be one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
return self.fgm(x, labels=labels, targeted=(self.y_target is not None))
def fgm(self, x, labels, targeted=False):
"""
TensorFlow Eager implementation of the Fast Gradient Method.
:param x: the input variable
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect.
Targeted will instead try to move in the direction
of being more like y.
:return: a tensor for the adversarial example
"""
# Compute loss
with tf.GradientTape() as tape:
# input should be watched because it may be
# combination of trainable and non-trainable variables
tape.watch(x)
loss_obj = LossCrossEntropy(self.model, smoothing=0.)
loss = loss_obj.fprop(x=x, y=labels)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad = tape.gradient(loss, x)
if self.ord == np.inf:
# Take sign of gradient
normalized_grad = tf.sign(grad)
# The following line should not change the numerical results.
# It applies only because `normalized_grad` is the output of
# a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the
# perturbation has a non-zero derivative.
normalized_grad = tf.stop_gradient(normalized_grad)
elif self.ord == 1:
red_ind = list(xrange(1, len(x.get_shape())))
avoid_zero_div = 1e-12
avoid_nan_norm = tf.maximum(avoid_zero_div,
reduce_sum(tf.abs(grad),
reduction_indices=red_ind,
keepdims=True))
normalized_grad = grad / avoid_nan_norm
elif self.ord == 2:
red_ind = list(xrange(1, len(x.get_shape())))
avoid_zero_div = 1e-12
square = tf.maximum(avoid_zero_div,
reduce_sum(tf.square(grad),
reduction_indices=red_ind,
keepdims=True))
normalized_grad = grad / tf.sqrt(square)
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are "
"currently implemented.")
# Multiply by constant epsilon
scaled_grad = self.eps * normalized_grad
# Add perturbation to original example to obtain adversarial example
adv_x = x + scaled_grad
# If clipping is needed
# reset all values outside of [clip_min, clip_max]
if (self.clip_min is not None) and (self.clip_max is not None):
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
class BasicIterativeMethod(Attack, attacks.BasicIterativeMethod):
"""
Inherited class from Attack and cleverhans.attacks.BasicIterativeMethod.
The Basic Iterative Method (Kurakin et al. 2016). The original paper used
hard labels for this attack; no label smoothing.
Paper link: https://arxiv.org/pdf/1607.02533.pdf
"""
FGM_CLASS = FastGradientMethod
def __init__(self, model, dtypestr='float32'):
"""
Creates a BasicIterativeMethod instance in eager execution.
:model: CNN network, should be an instance of
cleverhans.model.Model, if not wrap
the output to probs.
:dtypestr: datatype in the string format.
"""
if not isinstance(model, Model):
model = CallableModelWrapper(model, 'probs')
super(BasicIterativeMethod, self).__init__(model, dtypestr)
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# Content:
# 地址:http: //www.runoob.com/python/python-exercise-example.html
# 知识点:
# 地址:
# Notes:
from bs4 import BeautifulSoup
import requests, sys, urllib2
reload(sys)
sys.setdefaultencoding("utf8")
wttr = "http://wttr.in/?format=1"
wttrre = urllib2.Request(wttr)
wttrpon = urllib2.urlopen(wttrre, timeout=60)
wttrapi = wttrpon.read().replace("\n", "")
print(wttrapi)
print("---")
fwttr = "http://wttr.in?0"
fwttrre = urllib2.Request(fwttr)
fwttrpon = urllib2.urlopen(fwttrre, timeout=60)
fwttrapi = fwttrpon.read()
fbs = BeautifulSoup(fwttrapi, "html.parser")
a = (
str(fbs.body.get_text())
.replace("\n\n", "\n")
.replace("\n", "| trim=false size=16 color=#FFFFFF font=Courier New \n")
)
print(a)
# //TODO 改造为Python3 的版本
|
import datetime
import warnings
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from regressiontests.views.models import Article, UrlArticle
class CreateObjectTest(TestCase):
fixtures = ['testdata.json']
urls = 'regressiontests.views.generic_urls'
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.generic.create_update')
def tearDown(self):
self.restore_warnings_state()
def test_login_required_view(self):
"""
Verifies that an unauthenticated user attempting to access a
login_required view gets redirected to the login page and that
an authenticated user is let through.
"""
view_url = '/create_update/member/create/article/'
response = self.client.get(view_url)
self.assertRedirects(response, '/accounts/login/?next=%s' % view_url)
# Now login and try again.
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'views/article_form.html')
def test_create_article_display_page(self):
"""
Ensures the generic view returned the page and contains a form.
"""
view_url = '/create_update/create/article/'
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'views/article_form.html')
if not response.context.get('form'):
self.fail('No form found in the response.')
def test_create_article_with_errors(self):
"""
POSTs a form that contains validation errors.
"""
view_url = '/create_update/create/article/'
num_articles = Article.objects.count()
response = self.client.post(view_url, {
'title': 'My First Article',
})
self.assertFormError(response, 'form', 'slug', [u'This field is required.'])
self.assertTemplateUsed(response, 'views/article_form.html')
self.assertEqual(num_articles, Article.objects.count(),
"Number of Articles should not have changed.")
def test_create_custom_save_article(self):
"""
Creates a new article using a custom form class with a save method
that alters the slug entered.
"""
view_url = '/create_update/create_custom/article/'
response = self.client.post(view_url, {
'title': 'Test Article',
'slug': 'this-should-get-replaced',
'author': 1,
'date_created': datetime.datetime(2007, 6, 25),
})
self.assertRedirects(response,
'/create_update/view/article/some-other-slug/',
target_status_code=404)
class UpdateDeleteObjectTest(TestCase):
fixtures = ['testdata.json']
urls = 'regressiontests.views.generic_urls'
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.generic.create_update')
def tearDown(self):
self.restore_warnings_state()
def test_update_object_form_display(self):
"""
Verifies that the form was created properly and with initial values.
"""
response = self.client.get('/create_update/update/article/old_article/')
self.assertTemplateUsed(response, 'views/article_form.html')
self.assertHTMLEqual(unicode(response.context['form']['title']),
u'<input id="id_title" type="text" name="title" value="Old Article" maxlength="100" />')
def test_update_object(self):
"""
Verifies the updating of an Article.
"""
response = self.client.post('/create_update/update/article/old_article/', {
'title': 'Another Article',
'slug': 'another-article-slug',
'author': 1,
'date_created': datetime.datetime(2007, 6, 25),
})
article = Article.objects.get(pk=1)
self.assertEqual(article.title, "Another Article")
def test_delete_object_confirm(self):
"""
Verifies the confirm deletion page is displayed using a GET.
"""
response = self.client.get('/create_update/delete/article/old_article/')
self.assertTemplateUsed(response, 'views/article_confirm_delete.html')
def test_delete_object(self):
"""
Verifies the object actually gets deleted on a POST.
"""
view_url = '/create_update/delete/article/old_article/'
response = self.client.post(view_url)
try:
Article.objects.get(slug='old_article')
except Article.DoesNotExist:
pass
else:
self.fail('Object was not deleted.')
class PostSaveRedirectTests(TestCase):
"""
Verifies that the views redirect to the correct locations depending on
if a post_save_redirect was passed and a get_absolute_url method exists
on the Model.
"""
fixtures = ['testdata.json']
article_model = Article
urls = 'regressiontests.views.generic_urls'
create_url = '/create_update/create/article/'
update_url = '/create_update/update/article/old_article/'
delete_url = '/create_update/delete/article/old_article/'
create_redirect = '/create_update/view/article/my-first-article/'
update_redirect = '/create_update/view/article/another-article-slug/'
delete_redirect = '/create_update/'
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.generic.create_update')
def tearDown(self):
self.restore_warnings_state()
def test_create_article(self):
num_articles = self.article_model.objects.count()
response = self.client.post(self.create_url, {
'title': 'My First Article',
'slug': 'my-first-article',
'author': '1',
'date_created': datetime.datetime(2007, 6, 25),
})
self.assertRedirects(response, self.create_redirect,
target_status_code=404)
self.assertEqual(num_articles + 1, self.article_model.objects.count(),
"A new Article should have been created.")
def test_update_article(self):
num_articles = self.article_model.objects.count()
response = self.client.post(self.update_url, {
'title': 'Another Article',
'slug': 'another-article-slug',
'author': 1,
'date_created': datetime.datetime(2007, 6, 25),
})
self.assertRedirects(response, self.update_redirect,
target_status_code=404)
self.assertEqual(num_articles, self.article_model.objects.count(),
"A new Article should not have been created.")
def test_delete_article(self):
num_articles = self.article_model.objects.count()
response = self.client.post(self.delete_url)
self.assertRedirects(response, self.delete_redirect,
target_status_code=404)
self.assertEqual(num_articles - 1, self.article_model.objects.count(),
"An Article should have been deleted.")
class NoPostSaveNoAbsoluteUrl(PostSaveRedirectTests):
"""
Tests that when no post_save_redirect is passed and no get_absolute_url
method exists on the Model that the view raises an ImproperlyConfigured
error.
"""
urls = 'regressiontests.views.generic_urls'
create_url = '/create_update/no_redirect/create/article/'
update_url = '/create_update/no_redirect/update/article/old_article/'
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.generic.create_update')
def tearDown(self):
self.restore_warnings_state()
def test_create_article(self):
self.assertRaises(ImproperlyConfigured,
super(NoPostSaveNoAbsoluteUrl, self).test_create_article)
def test_update_article(self):
self.assertRaises(ImproperlyConfigured,
super(NoPostSaveNoAbsoluteUrl, self).test_update_article)
def test_delete_article(self):
"""
The delete_object view requires a post_delete_redirect, so skip testing
here.
"""
pass
class AbsoluteUrlNoPostSave(PostSaveRedirectTests):
"""
Tests that the views redirect to the Model's get_absolute_url when no
post_save_redirect is passed.
"""
urls = 'regressiontests.views.generic_urls'
# Article model with get_absolute_url method.
article_model = UrlArticle
create_url = '/create_update/no_url/create/article/'
update_url = '/create_update/no_url/update/article/old_article/'
create_redirect = '/urlarticles/my-first-article/'
update_redirect = '/urlarticles/another-article-slug/'
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.generic.create_update')
def tearDown(self):
self.restore_warnings_state()
def test_delete_article(self):
"""
The delete_object view requires a post_delete_redirect, so skip testing
here.
"""
pass
|
# Copyright (c) 2021 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
import numpy as np
from geoh5py.workspace import Workspace
from SimPEG import utils
from geoapps.utils import get_inversion_output
from geoapps.utils.testing import setup_inversion_workspace
# import pytest
# pytest.skip("eliminating conflicting test.", allow_module_level=True)
# To test the full run and validate the inversion.
# Move this file out of the test directory and run.
target_ip_run = {
"data_norm": 0.00796,
"phi_d": 8.086,
"phi_m": 0.1146,
}
def test_ip_run(
tmp_path,
n_electrodes=4,
n_lines=3,
max_iterations=1,
pytest=True,
refinement=(4, 6),
):
from geoapps.drivers.induced_polarization_inversion import InducedPolarizationDriver
from geoapps.io.InducedPolarization.params import InducedPolarizationParams
np.random.seed(0)
# Run the forward
geoh5 = setup_inversion_workspace(
tmp_path,
background=1e-6,
anomaly=1e-1,
n_electrodes=n_electrodes,
n_lines=n_lines,
refinement=refinement,
dcip=True,
flatten=False,
)
tx_obj = geoh5.get_entity("survey (currents)")[0]
tx_obj.cells = tx_obj.cells.astype("uint32")
model = geoh5.get_entity("model")[0]
params = InducedPolarizationParams(
forward_only=True,
geoh5=geoh5,
mesh=model.parent.uid,
topography_object=geoh5.get_entity("topography")[0].uid,
resolution=0.0,
z_from_topo=True,
data_object=geoh5.get_entity("survey")[0].uid,
starting_model_object=model.parent.uid,
starting_model=model.uid,
conductivity_model=1e-2,
)
params.workpath = tmp_path
fwr_driver = InducedPolarizationDriver(params)
fwr_driver.run()
geoh5 = Workspace(geoh5.h5file)
potential = geoh5.get_entity("Predicted_chargeability")[0]
# Run the inverse
np.random.seed(0)
params = InducedPolarizationParams(
geoh5=geoh5,
mesh=geoh5.get_entity("mesh")[0].uid,
topography_object=geoh5.get_entity("topography")[0].uid,
resolution=0.0,
data_object=potential.parent.uid,
conductivity_model=1e-2,
starting_model=1e-6,
s_norm=0.0,
x_norm=0.0,
y_norm=0.0,
z_norm=0.0,
gradient_type="components",
chargeability_channel_bool=True,
z_from_topo=True,
chargeability_channel=potential.uid,
chargeability_uncertainty=2e-4,
max_iterations=max_iterations,
initial_beta=None,
initial_beta_ratio=1e0,
prctile=100,
upper_bound=0.1,
tile_spatial=n_lines,
)
params.workpath = tmp_path
driver = InducedPolarizationDriver(params)
driver.run()
output = get_inversion_output(
driver.params.geoh5.h5file, driver.params.ga_group.uid
)
if pytest:
np.testing.assert_almost_equal(
np.linalg.norm(potential.values),
target_ip_run["data_norm"],
decimal=3,
)
np.testing.assert_almost_equal(output["phi_m"][1], target_ip_run["phi_m"])
np.testing.assert_almost_equal(output["phi_d"][1], target_ip_run["phi_d"])
else:
return fwr_driver.starting_model, driver.inverse_problem.model
if __name__ == "__main__":
# Full run
m_start, m_rec = test_ip_run(
"./",
n_electrodes=20,
n_lines=5,
max_iterations=20,
pytest=False,
refinement=(4, 8),
)
residual = np.linalg.norm(m_rec - m_start) / np.linalg.norm(m_start) * 100.0
assert (
residual < 80.0
), f"Deviation from the true solution is {residual:.2f}%. Validate the solution!"
print("Conductivity model is within 15% of the answer. You are so special!")
|
import carto2gpd
import pytest
import pandas as pd
def test_limit():
url = "https://phl.carto.com/api/v2/sql"
gdf = carto2gpd.get(url, "shootings", limit=5)
assert len(gdf) == 5
def test_fields():
url = "https://phl.carto.com/api/v2/sql"
fields = ["age", "fatal"]
gdf = carto2gpd.get(url, "shootings", fields=fields, limit=5)
assert all(col in gdf.columns for col in ["age", "fatal", "geometry"])
def test_where():
url = "https://phl.carto.com/api/v2/sql"
where = "fatal > 0"
gdf = carto2gpd.get(url, "shootings", where=where, limit=5)
assert (gdf.fatal > 0).all()
def test_bad_table():
url = "https://phl.carto.com/api/v2/sql"
with pytest.raises(ValueError):
gdf = carto2gpd.get(url, "nonexistent_table", limit=5)
def test_bad_where():
url = "https://phl.carto.com/api/v2/sql"
where = "bad_column > 0"
with pytest.raises(ValueError):
gdf = carto2gpd.get(url, "shootings", where=where, limit=5)
def test_no_geometry():
url = "https://phl.carto.com/api/v2/sql"
df = carto2gpd.get(url, "li_com_act_licenses", limit=5)
assert isinstance(df, pd.DataFrame)
|
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
__metaclass__ = type
from . import values as valuesModule
from . import base as baseModule
from . import standards as standardsModule
symmetryNoneToken = 'none'
symmetryLowerToken = 'lower'
symmetryUpperToken = 'upper'
symmetryOptions = ( symmetryNoneToken, symmetryLowerToken, symmetryUpperToken )
permutationPlusToken = '+'
permutationMinusToken = '-'
permutationOptions = ( permutationPlusToken, permutationMinusToken )
storageRowToken = 'row-major'
storageColumnToken = 'column-major'
class arrayBase( baseModule.xDataCoreMembers ) :
moniker = 'array'
def __init__( self, shape = None, symmetry = None, storageOrder = storageRowToken,
offset = None, permutation = permutationPlusToken,
index = None, label = None ) :
if( not( isinstance( self.compression, str ) ) ) : raise TypeError( 'compression must be a string' )
baseModule.xDataCoreMembers.__init__( self, self.moniker, index = index, label = label )
shape = tuple( int( value ) for value in shape )
if( len( shape ) == 0 ) : raise ValueError( 'shape must contain at least one value' )
if( min( shape ) <= 0 ) : raise ValueError( 'illegal shape "%s": lengths must all be greater than 0' % str(shape) )
self.__shape = shape
if( self.dimension > 3 ) : raise Exception( 'Currently, dimension = %d > 3 not supported' % len( self ) )
if( not( isinstance( symmetry, str ) ) ) : raise TypeError( 'symmetry must be a string' )
if( symmetry not in symmetryOptions ) :
raise ValueError( 'invalid symmetry = "%s"' % symmetry )
self.__symmetry = symmetry
if( symmetry != symmetryNoneToken ) :
for length in shape :
if( length != shape[0] ) : raise ValueError( 'a symmetrical array must be "square": shape = %s' % shape )
if( permutation != permutationPlusToken ) : raise TypeError( 'currently, only "%s" permutation is supported' % permutationPlusToken )
self.__permutation = permutationPlusToken
if( not( isinstance( storageOrder, str ) ) ) : raise TypeError( 'storageOrder must be a string' )
if( storageOrder not in [ storageRowToken, storageColumnToken ] ) :
raise ValueError( 'invalid storageOrder = "%s"' % storageOrder )
self.__storageOrder = storageOrder
if( offset is not None ) :
offset = [ int( value ) for value in offset ]
if( len( offset ) != len( shape ) ) : raise ValueError( 'offset must contain one value for each dimension' )
if( min( offset ) < 0 ) : raise ValueError( 'offsets must be non-negative: %s' % offset )
self.__offset = offset
def __len__( self ) :
return( self.dimension )
@property
def dimension( self ) :
return( len( self.__shape ) )
@property
def shape( self ) :
return( self.__shape )
@property
def size( self ) :
size = 1
for length in self.__shape : size *= length
return( size )
@property
def symmetry( self ) :
return( self.__symmetry )
@property
def permutation( self ) :
return( self.__permutation )
@property
def compression( self ) :
return( self.compression )
@property
def storageOrder( self ) :
return( self.__storageOrder )
@property
def offset( self ) :
return( self.__offset )
def offsetScaleValues( self, offset, scale ):
"""Modify every element in the array: multiply by scale and add offset."""
self.values.offsetScaleValues( offset, scale )
def attributesToXMLAttributeStr( self ) :
attributeStr = ' shape="%s"' % ','.join( [ "%d" % length for length in self.shape ] )
if( self.compression != full.compression ) : attributeStr += ' compression="%s"' % self.compression
if( self.symmetry != symmetryNoneToken ) : attributeStr += ' symmetry="%s"' % self.symmetry
if( self.permutation != permutationPlusToken ) : attributeStr += ' permutation="%s"' % self.permutation
if( self.offset is not None ) : attributeStr += ' offset="%s"' % ','.join( [ "%d" % offset for offset in self.offset ] )
if( self.storageOrder != storageRowToken ) : attributeStr += ' storageOrder="%s"' % self.storageOrder
attributeStr += baseModule.xDataCoreMembers.attributesToXMLAttributeStr( self )
return( attributeStr )
def toXML( self, indent = '', **kwargs ) :
return( '\n'.join( self.toXMLList( indent = indent, **kwargs ) ) )
@classmethod
def parseXMLNode( cls, xDataElement, xPath, linkData, **kwargs ) :
xPath.append( xDataElement.tag )
attributes = arrayBase.parseXMLNodeAttributes( xDataElement )
compression = attributes.pop( 'compression' )
numberOfValues = { full.compression : [ 1 ], diagonal.compression : [ 1, 2 ], flattened.compression : [ 3 ],
embedded.compression : [ -1 ] }[compression]
if( ( numberOfValues[0] != -1 ) and ( len( xDataElement ) not in numberOfValues ) ) :
raise Exception( '%s array expects %s sub-elements: got %d' % ( cls.compression, numberOfValues, len( xDataElement ) ) )
shape = attributes.pop( 'shape' )
valuesDict = {}
if( compression != embedded.compression ) :
values = [ valuesModule.values.parseXMLNode( valuesElements, xPath, linkData ) for valuesElements in xDataElement ]
for value in values :
label = value.label
if( value.label is None ) : label = 'data'
valuesDict[label] = value
if( compression == full.compression ) :
array1 = full( shape, valuesDict['data'], **attributes )
elif( compression == diagonal.compression ) :
if( 'startingIndices' not in valuesDict ) : valuesDict['startingIndices'] = None
array1 = diagonal( shape, valuesDict['data'], valuesDict['startingIndices'], **attributes )
elif( compression == flattened.compression ) :
array1 = flattened( shape, valuesDict['data'], valuesDict['starts'], valuesDict['lengths'], **attributes )
elif( compression == embedded.compression ) :
array1 = embedded( shape, **attributes )
for subArrayElement in xDataElement :
array2 = arrayBase.parseXMLNode( subArrayElement, xPath, linkData )
array1.addArray( array2 )
else :
raise TypeError( 'Unsupported array type = "%s"' % compression )
xPath.pop( )
return( array1 )
@classmethod
def parseXMLString( cls, XMLString ) :
from xml.etree import cElementTree
return( cls.parseXMLNode( cElementTree.fromstring( XMLString ), xPath=[], linkData={} ) )
@staticmethod
def parseXMLNodeAttributes( xDataElement ) :
attributes = { 'shape' : ( None, str ),
'symmetry' : ( symmetryNoneToken, str ),
'permutation' : ( permutationPlusToken, str ),
'storageOrder' : ( storageRowToken, str ),
'offset' : ( None, str ),
'compression' : ( None, str ),
'index' : ( None, int ),
'label' : ( None, str ) }
attrs = {}
for key, item in list( attributes.items( ) ) : attrs[key] = item[0]
for key, item in list( xDataElement.items( ) ) :
if( key not in attributes ) : raise TypeError( 'Invalid attribute "%s"' % key )
attrs[key] = attributes[key][1]( item )
if( attrs['shape'] is None ) : raise ValueError( 'shape attribute is missing from array' )
attrs['shape'] = attrs['shape'].split( ',' )
if( attrs['offset'] is not None ) : attrs['offset'] = attrs['offset'].split( ',' )
if( attrs['compression'] is None ) : attrs['compression'] = full.compression
return( attrs )
@staticmethod
def indicesToFlatIndices( shape, indices ) :
flatIndices = []
for index in indices :
if( len( index ) != len( shape ) ) :
raise Exception( 'len( index ) = %d != len( shape ) = %d' % ( len( index ), len( shape ) ) )
length, flatIndex = 1, 0
for i1, i2 in enumerate( index ) :
flatIndex *= length
flatIndex += i2
length = shape[i1]
flatIndices.append( flatIndex )
return( flatIndices )
@staticmethod
def flatIndicesToIndices( shape, flatIndices ) :
indices = []
products = [ 1 ]
for s1 in shape : products.append( s1 * products[-1] )
del products[-1]
products.reverse( )
for i1 in flatIndices :
index = []
for product in products :
i2, i1 = divmod( i1, product )
index.append( i2 )
indices.append( index )
return( indices )
class full( arrayBase ) :
compression = 'full'
ancestryMembers = baseModule.xDataCoreMembers.ancestryMembers + ( 'values', )
def __init__( self, shape = None, data = None, symmetry = symmetryNoneToken, storageOrder = storageRowToken,
offset = None, permutation = permutationPlusToken,
index = None, label = None ) :
arrayBase.__init__( self, shape, symmetry = symmetry, storageOrder = storageOrder,
offset = offset, permutation = permutation,
index = index, label = label )
if( not( isinstance( data, valuesModule.values ) ) ) : data = valuesModule.values( data )
if( symmetry == symmetryNoneToken ) :
size = self.size
else : # Will be a 'square' array. Checked in arrayBase.
length, size = self.shape[0], 1
for i1 in range( self.dimension ) : size = size * ( length + i1 ) / ( i1 + 1 )
if( size != len( data ) ) : raise ValueError( 'shape requires %d values while data has %d values' % ( size, len( data ) ) )
self.values = data
self.values.setAncestor( self )
def constructArray( self ) :
import numpy
if( self.symmetry == symmetryNoneToken ) :
array1 = numpy.array( [ value for value in self.values ] )
elif len(self.shape) == 2:
array1 = numpy.zeros( self.shape )
if( self.symmetry == symmetryLowerToken ) :
array1[numpy.tril_indices(self.shape[0])] = list(self.values)
il = numpy.tril_indices(self.shape[0], -1)
iu = (il[1],il[0])
array1[iu] = array1[il]
elif( self.symmetry == symmetryUpperToken ) :
array1[numpy.triu_indices(self.shape[0])] = list(self.values)
iu = numpy.triu_indices(self.shape[0], 1)
il = (iu[1],iu[0])
array1[il] = array1[iu]
else :
import itertools
array1 = numpy.zeros( self.size )
dimension = self.dimension
length = self.shape[0]
indexRange = range( len( self ) )
indices = dimension * [ 0 ]
indexChange = dimension - 1
mode = ( ( self.symmetry == symmetryUpperToken ) and ( self.storageOrder == storageRowToken ) ) or \
( self.symmetry == symmetryLowerToken ) and ( self.storageOrder == storageColumnToken )
for value in self.values :
permutations = itertools.permutations( indices )
for permutation in permutations :
index = permutation[0]
for p1 in permutation[1:] : index = length * index + p1
array1[index] = value
if( mode ) :
for i1 in indexRange :
indices[i1] += 1
if( indices[i1] < length ) : break
for i2 in indexRange :
if( i1 == i2 ) : break
indices[i2] = indices[i1]
else :
indexChange += 1
if( indexChange == dimension ) :
indexChange -= 1
value = indices[indexChange]
for i1 in indexRange :
if( i1 == ( dimension - 1 ) ) : break
if( indices[indexChange-1] > value ) : break
indices[indexChange] = 0
indexChange -= 1
indices[indexChange] += 1
order = { storageRowToken : 'C', storageColumnToken : 'F' }[self.storageOrder]
return( array1.reshape( self.shape, order = order ) )
def copy( self ) :
return( full( self.shape, self.values.copy( ),
symmetry = self.symmetry, storageOrder = self.storageOrder,
offset = self.offset, permutation = self.permutation,
index = self.index, label = self.label ) )
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
attributesStr = self.attributesToXMLAttributeStr( )
XMLList = [ '%s<%s%s>' % ( indent, self.moniker, attributesStr ) ]
XMLList += self.values.toXMLList( indent2, **kwargs )
XMLList[-1] += '</%s>' % self.moniker
return( XMLList )
class diagonal( arrayBase ) :
compression = 'diagonal'
ancestryMembers = baseModule.xDataCoreMembers.ancestryMembers + ( 'values', )
def __init__( self, shape = None, data = None, startingIndices = None, symmetry = symmetryNoneToken, storageOrder = storageRowToken,
offset = None, permutation = permutationPlusToken,
index = None, label = None ) :
arrayBase.__init__( self, shape, symmetry, storageOrder = storageOrder,
offset = offset, permutation = permutation,
index = index, label = label )
dimension = self.dimension
if( not( isinstance( data, valuesModule.values ) ) ) : data = valuesModule.values( data )
if( startingIndices is None ) :
self.startingIndicesOriginal = None
startingIndices = dimension * [ 0 ]
else :
if( not( isinstance( startingIndices, valuesModule.values ) ) ) :
startingIndices = valuesModule.values( startingIndices, valueType = standardsModule.types.integer32Token )
if( startingIndices.valueType not in [ standardsModule.types.integer32Token ] ) : raise TypeError( 'startingIndices must be a list of integers' )
self.startingIndicesOriginal = startingIndices
self.startingIndicesOriginal.label = 'startingIndices'
if( ( len( startingIndices ) == 0 ) or ( ( len( startingIndices ) % dimension ) != 0 ) ) :
raise ValueError( 'lenght of startingIndices = %d must be a multiple of dimension = %d' %
( len( startingIndices ), dimension ) )
startingIndices = [ value for value in startingIndices ]
if( min( startingIndices ) < 0 ) : raise ValueError( 'negative starting index not allowed' )
self.startingIndices = []
size = 0
while( len( startingIndices ) > 0 ) :
self.startingIndices.append( startingIndices[:dimension] )
startingIndices = startingIndices[dimension:]
startingIndex = self.startingIndices[-1]
offset = self.shape[0]
for i1, length in enumerate( self.shape ) :
offset = min( offset, length - startingIndex[i1] )
if( offset < 0 ) : raise ValueError( 'starting index must be less than length: %s' % startingIndex )
size += offset
if( size != len( data ) ) : raise ValueError( 'shape requires %d values while data has %d values' % ( size, len( data ) ) )
self.values = data
self.values.setAncestor( self )
def constructArray( self ) :
import numpy
import itertools
valuesIndex = 0
array1 = numpy.zeros( self.size )
shape = self.shape
range1 = range( self.dimension )
for startIndex in self.startingIndices :
si = [ index for index in startIndex ]
moreToDo = True
while( moreToDo ) :
value = self.values[valuesIndex]
valuesIndex += 1
if( self.symmetry == symmetryNoneToken ) :
permutations = [ si ]
else :
permutations = itertools.permutations( si )
for permutation in permutations :
scale, flatIndex = 1, 0
for i1, index in enumerate( permutation ) :
flatIndex = flatIndex * scale + index
scale = shape[i1]
array1[flatIndex] = value
for i1 in range1 :
si[i1] += 1
if( si[i1] >= shape[i1] ) : moreToDo = False
order = { storageRowToken : 'C', storageColumnToken : 'F' }[self.storageOrder]
return( array1.reshape( self.shape, order = order ) )
def copy( self ) :
startingIndicesOriginal = self.startingIndicesOriginal
if( startingIndicesOriginal is not None ) : startingIndicesOriginal = self.startingIndicesOriginal.copy( )
return( diagonal( self.shape, self.values.copy( ), startingIndicesOriginal,
symmetry = self.symmetry, storageOrder = self.storageOrder,
offset = self.offset, permutation = self.permutation,
index = self.index, label = self.label ) )
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
attributesStr = self.attributesToXMLAttributeStr( )
XMLList = [ '%s<%s%s>' % ( indent, self.moniker, attributesStr ) ]
if( self.startingIndicesOriginal is not None ) :
XMLList += self.startingIndicesOriginal.toXMLList( indent2, **kwargs )
XMLList += self.values.toXMLList( indent2, **kwargs )
XMLList[-1] += '</%s>' % self.moniker
return( XMLList )
class flattened( arrayBase ) :
compression = 'flattened'
ancestryMembers = baseModule.xDataCoreMembers.ancestryMembers + ( 'values', 'lengths', 'starts' )
def __init__( self, shape = None, data = None, starts = None, lengths = None, symmetry = symmetryNoneToken,
storageOrder = storageRowToken,
offset = None, permutation = permutationPlusToken,
index = None, label = None,
dataToString = None ) :
arrayBase.__init__( self, shape, symmetry, storageOrder = storageOrder,
offset = offset, permutation = permutation,
index = index, label = label )
self.dataToString = dataToString
if( not( isinstance( data, valuesModule.values ) ) ) : data = valuesModule.values( data )
if( not( isinstance( starts, valuesModule.values ) ) ) :
starts = valuesModule.values( starts, valueType = standardsModule.types.integer32Token )
if( not( isinstance( lengths, valuesModule.values ) ) ) :
lengths = valuesModule.values( lengths, valueType = standardsModule.types.integer32Token )
if( len( starts ) != len( lengths ) ) : raise ValueError( 'length of starts = %d must equal length of lengths = %d' %
( len( starts ), len( lengths ) ) )
size = len( data )
length = 0
for i1 in lengths : length += i1
if( size != length ) : raise ValueError( 'number of data = %d and sum of length = %d differ' % ( size, length ) )
indexPriorEnd = -1
for i1, start in enumerate( starts ) :
if( start < 0 ) : raise ValueError( 'negative start (=%d) not allowed' % start )
if( start < indexPriorEnd ) : raise ValueError( 'data overlap: prior index end = %d current start = %d' % ( indexPriorEnd, start ) )
length = lengths[i1]
if( length < 0 ) : raise ValueError( 'negative length (=%d) not allowed' % length )
indexPriorEnd = start + length
if( indexPriorEnd > self.size ) :
raise ValueError( 'data beyond array boundary: indexPriorEnd = %d, size = %d', ( indexPriorEnd, self.size ) )
self.starts = starts
starts.label = 'starts'
self.starts.setAncestor( self )
self.lengths = lengths
lengths.label = 'lengths'
self.lengths.setAncestor( self )
self.values = data
self.values.setAncestor( self )
def constructArray( self ) :
import numpy
index = 0
array1 = numpy.zeros( self.size )
for i1, start in enumerate( self.starts ) :
length = self.lengths[i1]
for i2 in range( length ) :
array1[start+i2] = self.values[index]
index += 1
order = { storageRowToken : 'C', storageColumnToken : 'F' }[self.storageOrder]
array1 = array1.reshape( self.shape, order = order )
if self.symmetry == symmetryLowerToken:
array1 = numpy.tril(array1) + numpy.tril(array1, -1).T
elif self.symmetry == symmetryUpperToken:
array1 = numpy.triu(array1) + numpy.triu(array1, -1).T
return array1
def copy( self ) :
return( flattened( self.shape, self.values.copy( ), self.starts.copy( ), self.lengths.copy( ),
symmetry = self.symmetry, storageOrder = self.storageOrder,
offset = self.offset, permutation = self.permutation,
index = self.index, label = self.label ) )
@staticmethod
def fromNumpyArray( array, symmetry = symmetryNoneToken, nzeroes = 4 ):
"""
Generate a sparse flattened array that represents an arbitrary numpy array.
Only supports 'full' or 'lower-symmetric' matrices, with row-major data storage.
:param array: input numpy array
:param symmetry: allowed values are 'none' or 'lower'
:param nzeroes: how many zeroes to allow before adding a new 'start' and 'length'
:return:
"""
starts, lengths, sparseData = [], [], []
def helper( data, offset = 0 ):
idx = 0
end = len(data)
while idx < end:
if data[idx] != 0:
stop = idx+1
while stop < end:
if data[stop] != 0:
stop += 1
elif any(data[stop:stop + nzeroes]):
for i in range(nzeroes):
if stop + i < end and data[stop + i] != 0: stop += i
else:
break
starts.append( idx + offset )
lengths.append( stop - idx )
sparseData.extend( data[idx:stop] )
idx = stop
idx += 1
if symmetry == symmetryNoneToken:
helper( array.flatten() )
elif symmetry == symmetryLowerToken:
rows, cols = array.shape
for row in range(rows):
dat = array[row][:row+1]
helper( dat, offset = row * cols )
else:
raise NotImplementedError("Symmetry = '%s'" % symmetry)
return flattened(shape=array.shape, data=sparseData, starts=starts, lengths=lengths, symmetry=symmetry)
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
attributesStr = self.attributesToXMLAttributeStr( )
XMLList = [ '%s<%s%s>' % ( indent, self.moniker, attributesStr ) ]
XMLList += self.starts.toXMLList( indent2, **kwargs )
XMLList += self.lengths.toXMLList( indent2, **kwargs )
XMLList += self.values.toXMLList( indent2, **kwargs )
XMLList[-1] += '</%s>' % self.moniker
return( XMLList )
class embedded( arrayBase ) :
compression = 'embedded'
def __init__( self, shape = None, symmetry = symmetryNoneToken, storageOrder = storageRowToken,
offset = None, permutation = permutationPlusToken,
index = None, label = None ) :
arrayBase.__init__( self, shape, symmetry, storageOrder = storageOrder,
offset = offset, permutation = permutation,
index = index, label = label )
self.arrays = []
def addArray( self, array ) :
if( not( isinstance( array, arrayBase ) ) ) : raise TypeError( 'variable not an array instance' )
if( self.dimension < array.dimension ) : raise ValueError( 'cannot embedded array into a smaller dimensional array: %s %s' %
( self.dimension, array.dimension ) )
shape = list( reversed( array.shape ) )
if( array.offset is None ) : raise TypeError( 'embedded array must have offset defined' )
offsets = list( reversed( array.offset ) )
shapeOfParent = list( reversed( self.shape ) )
for i1, offset in enumerate( offsets ) :
if( ( offset + shape[i1] ) > shapeOfParent[i1] ) :
raise ValueError( 'child array outside of parent: %s %s %s' % ( self.shape, array.shape, array.offset ) )
self.arrays.append( array )
def constructArray( self ) :
import numpy
order = { storageRowToken : 'C', storageColumnToken : 'F' }[self.storageOrder]
array1 = numpy.zeros( self.shape, order = order )
for array in self.arrays :
array2 = array.constructArray( )
slice1 = [ slice( offset, offset + array2.shape[i1] ) for i1, offset in enumerate( array.offset ) ]
array1[slice1] = array2
return( array1 )
def copy( self ) :
array1 = embedded( self.shape,
symmetry = self.symmetry, storageOrder = self.storageOrder,
offset = self.offset, permutation = self.permutation,
index = self.index, label = self.label )
for array in self.arrays : array1.addArray( array.copy( ) )
return array1
def offsetScaleValues( self, offset, scale ):
"""Modify every sub-array: multiply by scale and add offset."""
for subarray in self.arrays: subarray.offsetScaleValues( offset, scale )
def toXMLList( self, indent = '', **kwargs ) :
indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )
attributesStr = self.attributesToXMLAttributeStr( )
XMLList = [ '%s<%s%s>' % ( indent, self.moniker, attributesStr ) ]
for array in self.arrays : XMLList += array.toXMLList( indent2, **kwargs )
XMLList[-1] += '</%s>' % self.moniker
return( XMLList )
|
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
class PlugAlgoTest( GafferTest.TestCase ) :
def testPromote( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n1"] = GafferTest.AddNode()
s["b"]["n1"]["op1"].setValue( -10 )
s["n2"] = GafferTest.AddNode()
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n1"]["op1"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["n2"]["op1"], parent = s["b"]["user"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n1"]["op1"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n1"]["op2"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["n2"]["op1"] ) )
p = Gaffer.PlugAlgo.promote( s["b"]["n1"]["op1"] )
self.assertEqual( p.getName(), "op1" )
self.assertTrue( p.parent().isSame( s["b"] ) )
self.assertTrue( s["b"]["n1"]["op1"].getInput().isSame( p ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n1"]["op1"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["b"]["n1"]["op1"] ) )
self.assertEqual( p.getValue(), -10 )
def testPromoteColor( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["c"] = Gaffer.Color3fPlug()
s["b"]["n"]["c"].setValue( imath.Color3f( 1, 0, 1 ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["c"] )
self.assertTrue( isinstance( p, Gaffer.Color3fPlug ) )
self.assertTrue( s["b"]["n"]["c"].getInput().isSame( p ) )
self.assertTrue( s["b"]["n"]["c"]["r"].getInput().isSame( p["r"] ) )
self.assertTrue( s["b"]["n"]["c"]["g"].getInput().isSame( p["g"] ) )
self.assertTrue( s["b"]["n"]["c"]["b"].getInput().isSame( p["b"] ) )
self.assertEqual( p.getValue(), imath.Color3f( 1, 0, 1 ) )
def testPromoteCompoundPlugAndSerialise( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = GafferTest.CompoundPlugNode()
s["b"]["n"]["p"]["s"].setValue( "hello" )
Gaffer.PlugAlgo.promote( s["b"]["n"]["p"] )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["b"]["n"]["p"]["s"].getValue(), "hello" )
def testPromoteDynamicColorPlugAndSerialise( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["c"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.PlugAlgo.promote( s["b"]["n"]["c"] )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertTrue( isinstance( s["b"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( s["b"]["n"]["c"].getInput().isSame( s["b"]["c"] ) )
def testPromoteNonDynamicColorPlugAndSerialise( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Random()
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["baseColor"] )
p.setValue( imath.Color3f( 1, 2, 3 ) )
p.setName( "c" )
self.assertTrue( isinstance( s["b"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( s["b"]["n"]["baseColor"].getInput().isSame( s["b"]["c"] ) )
self.assertTrue( s["b"]["n"]["baseColor"]["r"].getInput().isSame( s["b"]["c"]["r"] ) )
self.assertTrue( s["b"]["n"]["baseColor"]["g"].getInput().isSame( s["b"]["c"]["g"] ) )
self.assertTrue( s["b"]["n"]["baseColor"]["b"].getInput().isSame( s["b"]["c"]["b"] ) )
self.assertEqual( s["b"]["c"].getValue(), imath.Color3f( 1, 2, 3 ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertTrue( isinstance( s2["b"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( s2["b"]["n"]["baseColor"].getInput().isSame( s2["b"]["c"] ) )
self.assertTrue( s2["b"]["n"]["baseColor"]["r"].getInput().isSame( s2["b"]["c"]["r"] ) )
self.assertTrue( s2["b"]["n"]["baseColor"]["g"].getInput().isSame( s2["b"]["c"]["g"] ) )
self.assertTrue( s2["b"]["n"]["baseColor"]["b"].getInput().isSame( s2["b"]["c"]["b"] ) )
self.assertEqual( s2["b"]["c"].getValue(), imath.Color3f( 1, 2, 3 ) )
def testCantPromoteNonSerialisablePlugs( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["p"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default & ~Gaffer.Plug.Flags.Serialisable )
self.assertEqual( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["p"] ), False )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, s["b"]["n"]["p"] )
def testUnpromoting( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n1"] = GafferTest.AddNode()
p = Gaffer.PlugAlgo.promote( s["b"]["n1"]["op1"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n1"]["op1"] ) )
self.assertTrue( p.node().isSame( s["b"] ) )
Gaffer.PlugAlgo.unpromote( s["b"]["n1"]["op1"] )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n1"]["op1"] ) )
self.assertTrue( p.node() is None )
def testColorUnpromoting( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["c"] = Gaffer.Color3fPlug()
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["c"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["r"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["g"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["b"] ) )
self.assertTrue( p.node().isSame( s["b"] ) )
Gaffer.PlugAlgo.unpromote( s["b"]["n"]["c"] )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["r"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["g"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["b"] ) )
self.assertTrue( p.node() is None )
def testIncrementalUnpromoting( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["c"] = Gaffer.Color3fPlug()
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["c"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["r"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["g"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["b"] ) )
self.assertTrue( p.node().isSame( s["b"] ) )
Gaffer.PlugAlgo.unpromote( s["b"]["n"]["c"]["r"] )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["r"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["g"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["b"] ) )
self.assertTrue( p.node().isSame( s["b"] ) )
Gaffer.PlugAlgo.unpromote( s["b"]["n"]["c"]["g"] )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["r"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["g"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["b"] ) )
self.assertTrue( p.node().isSame( s["b"] ) )
Gaffer.PlugAlgo.unpromote( s["b"]["n"]["c"]["b"] )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["r"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["g"] ) )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( s["b"]["n"]["c"]["b"] ) )
self.assertTrue( p.node() is None )
def testCantPromoteReadOnlyPlug( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["i"] = Gaffer.IntPlug()
s["b"]["n"]["c"] = Gaffer.Color3fPlug()
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["i"] ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"] ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"]["r"] ) )
s["b"]["n"]["i"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
s["b"]["n"]["c"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
s["b"]["n"]["c"]["r"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["i"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"]["r"] ) )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, s["b"]["n"]["i"] )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, s["b"]["n"]["c"] )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, s["b"]["n"]["c"]["r"] )
k = s["b"].keys()
uk = s["b"]["user"].keys()
try :
Gaffer.PlugAlgo.promote( s["b"]["n"]["i"] )
except Exception, e :
self.assertTrue( "Cannot promote" in str( e ) )
self.assertTrue( "read only" in str( e ) )
self.assertEqual( s["b"].keys(), k )
self.assertEqual( s["b"]["user"].keys(), uk )
def testCantPromotePlugWithReadOnlyChildren( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["c"] = Gaffer.Color3fPlug()
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"] ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"]["r"] ) )
s["b"]["n"]["c"]["r"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( s["b"]["n"]["c"]["r"] ) )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, s["b"]["n"]["c"] )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, s["b"]["n"]["c"]["r"] )
k = s["b"].keys()
uk = s["b"]["user"].keys()
try :
Gaffer.PlugAlgo.promote( s["b"]["n"]["c"] )
except Exception, e :
self.assertTrue( "Cannot promote" in str( e ) )
self.assertTrue( "read only" in str( e ) )
self.assertEqual( s["b"].keys(), k )
self.assertEqual( s["b"]["user"].keys(), uk )
def testMakePlugReadOnlyAfterPromoting( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = GafferTest.AddNode()
s["b"]["n"]["op1"].setValue( 0 )
s["b"]["n"]["op2"].setValue( 0 )
self.assertEqual( s["b"]["n"]["sum"].getValue(), 0 )
op1 = Gaffer.PlugAlgo.promote( s["b"]["n"]["op1"] )
s["b"]["n"]["op1"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
op1.setValue( 1 )
self.assertEqual( s["b"]["n"]["sum"].getValue(), 1 )
def testPromoteOutputPlug( self ) :
b = Gaffer.Box()
b["n"] = GafferTest.AddNode()
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["n"]["sum"] ) )
sum = Gaffer.PlugAlgo.promote( b["n"]["sum"] )
self.assertTrue( b.isAncestorOf( sum ) )
self.assertTrue( sum.direction() == Gaffer.Plug.Direction.Out )
self.assertEqual( sum.getInput(), b["n"]["sum"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( b["n"]["sum"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( b["n"]["sum"] ) )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, b["n"]["sum"] )
b["n"]["op1"].setValue( 10 )
b["n"]["op2"].setValue( 12 )
self.assertEqual( sum.getValue(), 22 )
Gaffer.PlugAlgo.unpromote( b["n"]["sum"] )
self.assertFalse( Gaffer.PlugAlgo.isPromoted( b["n"]["sum"] ) )
self.assertTrue( sum.parent() is None )
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["n"]["sum"] ) )
def testPromoteDynamicBoxPlugAndSerialise( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["p"] = Gaffer.Box2iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["p"] )
p.setValue( imath.Box2i( imath.V2i( 1, 2 ), imath.V2i( 3, 4 ) ) )
p.setName( "c" )
self.assertTrue( isinstance( s["b"]["c"], Gaffer.Box2iPlug ) )
self.assertTrue( s["b"]["n"]["p"].getInput().isSame( s["b"]["c"] ) )
self.assertTrue( s["b"]["n"]["p"]["min"].getInput().isSame( s["b"]["c"]["min"] ) )
self.assertTrue( s["b"]["n"]["p"]["max"].getInput().isSame( s["b"]["c"]["max"] ) )
self.assertEqual( s["b"]["c"].getValue(), imath.Box2i( imath.V2i( 1, 2 ), imath.V2i( 3, 4 ) ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertTrue( isinstance( s2["b"]["c"], Gaffer.Box2iPlug ) )
self.assertTrue( s2["b"]["n"]["p"].getInput().isSame( s2["b"]["c"] ) )
self.assertTrue( s2["b"]["n"]["p"]["min"].getInput().isSame( s2["b"]["c"]["min"] ) )
self.assertTrue( s2["b"]["n"]["p"]["max"].getInput().isSame( s2["b"]["c"]["max"] ) )
self.assertEqual( s2["b"]["c"].getValue(), imath.Box2i( imath.V2i( 1, 2 ), imath.V2i( 3, 4 ) ) )
def testPromoteStaticPlugsWithChildren( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = GafferTest.CompoundPlugNode()
s["b"]["n"]["valuePlug"]["i"].setValue( 10 )
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["valuePlug"] )
p.setName( "p" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["b"]["n"]["valuePlug"]["i"].getValue(), 10 )
self.assertTrue( s2["b"]["n"]["valuePlug"]["i"].getInput().isSame( s2["b"]["p"]["i"] ) )
def testPromoteDynamicPlugsWithChildren( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["user"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["b"]["n"]["user"]["p"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["b"]["n"]["user"]["p"]["p"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["b"]["n"]["user"]["v"] = Gaffer.ValuePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["b"]["n"]["user"]["v"]["v"] = Gaffer.ValuePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["b"]["n"]["user"]["v"]["v"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["user"]["p"] )
p.setName( "p" )
p["p"]["i"].setValue( 10 )
v = Gaffer.PlugAlgo.promote( s["b"]["n"]["user"]["v"] )
v.setName( "v" )
v["v"]["i"].setValue( 20 )
def assertValid( script ) :
self.assertEqual( script["b"]["n"]["user"]["p"]["p"]["i"].getValue(), 10 )
self.assertTrue( script["b"]["n"]["user"]["p"]["p"]["i"].getInput().isSame( script["b"]["p"]["p"]["i"] ) )
self.assertTrue( script["b"]["n"]["user"]["p"]["p"].getInput().isSame( script["b"]["p"]["p"] ) )
self.assertTrue( script["b"]["n"]["user"]["p"].getInput().isSame( script["b"]["p"] ) )
self.assertEqual( script["b"]["n"]["user"]["v"]["v"]["i"].getValue(), 20 )
self.assertTrue( script["b"]["n"]["user"]["v"]["v"]["i"].getInput().isSame( script["b"]["v"]["v"]["i"] ) )
self.assertTrue( script["b"]["n"]["user"]["v"]["v"].getInput().isSame( script["b"]["v"]["v"] ) )
self.assertTrue( script["b"]["n"]["user"]["v"].getInput().isSame( script["b"]["v"] ) )
assertValid( s )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
assertValid( s2 )
def testPromoteArrayPlug( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = GafferTest.ArrayPlugNode()
p = Gaffer.PlugAlgo.promote( ( s["b"]["n"]["in"] ) )
p.setName( "p" )
s["b"]["p"][0].setInput( s["a"]["sum"] )
s["b"]["p"][1].setInput( s["a"]["sum"] )
self.assertEqual( len( s["b"]["n"]["in"] ), 3 )
self.assertTrue( s["b"]["n"]["in"].getInput().isSame( s["b"]["p"] ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["b"]["n"]["in"] ), 3 )
self.assertTrue( s2["b"]["n"]["in"].getInput().isSame( s2["b"]["p"] ) )
def testPromotionIncludesArbitraryMetadata( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["user"]["p"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.Metadata.registerValue( s["b"]["n"]["user"]["p"], "testInt", 10 )
Gaffer.Metadata.registerValue( s["b"]["n"]["user"]["p"], "testString", "test" )
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["user"]["p"] )
p.setName( "p" )
self.assertEqual( Gaffer.Metadata.value( p, "testInt" ), 10 )
self.assertEqual( Gaffer.Metadata.value( p, "testString" ), "test" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s2["b"]["p"], "testInt" ), 10 )
self.assertEqual( Gaffer.Metadata.value( s2["b"]["p"], "testString" ), "test" )
def testPromotionIncludesArbitraryChildMetadata( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["user"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["b"]["n"]["user"]["p"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.Metadata.registerValue( s["b"]["n"]["user"]["p"], "testInt", 10 )
Gaffer.Metadata.registerValue( s["b"]["n"]["user"]["p"]["i"], "testString", "test" )
p = Gaffer.PlugAlgo.promote( s["b"]["n"]["user"]["p"] )
p.setName( "p" )
self.assertEqual( Gaffer.Metadata.value( p, "testInt" ), 10 )
self.assertEqual( Gaffer.Metadata.value( p["i"], "testString" ), "test" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.Metadata.value( s2["b"]["p"], "testInt" ), 10 )
self.assertEqual( Gaffer.Metadata.value( s2["b"]["p"]["i"], "testString" ), "test" )
def testPromoteToNonBoxParent( self ) :
n = Gaffer.Node()
n["n"] = GafferTest.AddNode()
self.assertTrue( Gaffer.PlugAlgo.canPromote( n["n"]["op1"] ) )
p = Gaffer.PlugAlgo.promote( n["n"]["op1"] )
self.assertTrue( p.isSame( n["op1"] ) )
self.assertTrue( n["n"]["op1"].getInput().isSame( n["op1"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( n["n"]["op1"] ) )
self.assertFalse( n["op1"].getFlags( Gaffer.Plug.Flags.Dynamic ) )
Gaffer.PlugAlgo.unpromote( n["n"]["op1"] )
self.assertTrue( "op1" not in "n" )
self.assertTrue( n["n"]["op1"].getInput() is None )
def testPromotionParent( self ) :
n1 = Gaffer.Node()
n1["n"] = GafferTest.AddNode()
n2 = Gaffer.Node()
self.assertTrue( Gaffer.PlugAlgo.canPromote( n1["n"]["op1"], parent = n1["user"] ) )
self.assertFalse( Gaffer.PlugAlgo.canPromote( n1["n"]["op1"], parent = n2["user"] ) )
self.assertRaises( RuntimeError, Gaffer.PlugAlgo.promote, n1["n"]["op1"], parent = n2["user"] )
p = Gaffer.PlugAlgo.promote( n1["n"]["op1"], parent = n1["user"] )
self.assertTrue( p.parent().isSame( n1["user"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( n1["n"]["op1"] ) )
def testPromotionExcludingMetadata( self ) :
n = Gaffer.Node()
n["a"] = GafferTest.AddNode()
Gaffer.Metadata.registerValue( n["a"]["op1"], "test", "testValue" )
Gaffer.Metadata.registerValue( n["a"]["op2"], "test", "testValue" )
p1 = Gaffer.PlugAlgo.promote( n["a"]["op1"] )
self.assertEqual( Gaffer.Metadata.value( p1, "test" ), "testValue" )
p2 = Gaffer.PlugAlgo.promote( n["a"]["op2"], excludeMetadata = "*" )
self.assertEqual( Gaffer.Metadata.value( p2, "test" ), None )
def testPromotedNonBoxMetadataIsNonPersistent( self ) :
n = Gaffer.Node()
n["a"] = GafferTest.AddNode()
Gaffer.Metadata.registerValue( n["a"]["op1"], "testPersistence", 10 )
p = Gaffer.PlugAlgo.promote( n["a"]["op1"] )
self.assertEqual( Gaffer.Metadata.value( p, "testPersistence" ), 10 )
self.assertTrue( "testPersistence" in Gaffer.Metadata.registeredValues( p ) )
self.assertTrue( "testPersistence" not in Gaffer.Metadata.registeredValues( p, persistentOnly = True ) )
def testPromoteWithName( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n1"] = GafferTest.AddNode()
p = Gaffer.PlugAlgo.promoteWithName( s["b"]["n1"]["op1"], 'newName' )
self.assertEqual( p.getName(), 'newName' )
def testPromotePlugWithDescendantValues( self ) :
n = Gaffer.Node()
n["a"] = Gaffer.Node()
n["a"]["p"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["a"]["p"]["c"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["a"]["p"]["c"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["a"]["p"]["c"]["v"] = Gaffer.V3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["a"]["p"]["c"]["i"].setValue( 10 )
n["a"]["p"]["c"]["v"].setValue( imath.V3f( 1, 2, 3 ) )
p = Gaffer.PlugAlgo.promote( n["a"]["p"] )
self.assertEqual( n["a"]["p"]["c"]["i"].getValue(), 10 )
self.assertEqual( n["a"]["p"]["c"]["v"].getValue(), imath.V3f( 1, 2, 3 ) )
def testPromoteNonSerialisableOutput( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["a"] = GafferTest.AddNode()
s["b"]["a"]["sum"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
Gaffer.PlugAlgo.promote( s["b"]["a"]["sum"] )
self.assertTrue( s["b"]["sum"].getInput().isSame( s["b"]["a"]["sum"] ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertTrue( s2["b"]["sum"].getInput().isSame( s2["b"]["a"]["sum"] ) )
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Joy
EPS = 0.22
SPEED_FACTOR = 500
class teleJoy(object):
def __init__(self):
self.joy = rospy.Subscriber('/joy', Joy, self.__callback_func)
self.pub = rospy.Publisher('/bot_vel', Float32MultiArray, queue_size=1)
self.vels = [0.0, 0.0, 0.0, 0.0] #x, y, w, stop
self.mode = 0
print('Initialized')
def __mode(self, mode_truth):
"""This function can be used to select between different
modes of driving. Assing any button to any action you want.
"""
pass
def __callback_func(self, msg):
""" Function to get called every time a new joystick msg
is received.
Args:
msg (Joy): ROS msg containing bot speeds.
"""
if (abs(msg.axes[1]) > EPS):
self.vels[0] = msg.axes[1] * SPEED_FACTOR
else: self.vels[0] = 0.0
if (abs(msg.axes[0]) > EPS):
self.vels[1] = msg.axes[0] * SPEED_FACTOR
else: self.vels[1] = 0.0
if (abs(msg.axes[3]) > EPS):
self.vels[2] = msg.axes[3] * SPEED_FACTOR
else: self.vels[2] = 0.0
if msg.buttons[0] == 1: # stop button
self.vels[3] = (self.vels[3] + 1) % 2
arr_msg = Float32MultiArray()
arr_msg.data = self.vels
rospy.sleep(0.001)
self.pub.publish(arr_msg)
rospy.loginfo('Data received: {}'.format(self.vels))
def __arr_pub(self, arr_msg):
""" If there are more than one topic to publish, define a
function to publish in order to make code cleaner.
"""
pass
if __name__ == "__main__":
rospy.init_node("bot_teleop_control")
joy_ctrl = teleJoy()
rospy.spin()
|
import math
import sys
file = 'input.txt'
# For using test input instead of file input comment out following line
# file = 'input_example.txt'
with open(file) as f:
t = f.read() # read complete file, results string of lines with endings
print(' -------------- part 1: -------------- ')
print(' -------------- read in tiles --------------')
def revstr(s):
return ''.join(reversed(s)) # Reversed returns reversed iterator. Join its results to empty string.
tilesCount = 0
tilesBorder = dict()
tilesPic = dict()
for tile in t.split('\n\n'): # datasets separated by empty lines, change into list, lines still with endings
tile = tile.split('\n') # lines in tile
tile = [line.rstrip('\n') for line in tile] # each line without return
# get name and tile description
name = tile[0][-5:-1] # first line contains tile name ("Tile 3557:")
tilePic = tile[1:] # the other lines contain the tile
# get four strings of the border characters, borders read clockwise
top = tilePic[0]
bottom = ''.join(reversed(tilePic[-1])) # last line in reversed order
left = ''.join(reversed([line[0] for line in tilePic])) # first column in reversed order
right = ''.join([line[-1] for line in tilePic]) # last column
# store results
tilesCount += 1
tilesBorder[name] = (top, right, bottom, left)
tilesPic[name] = tilePic
tilesNoInLine = int(math.sqrt(tilesCount))
print("tileDim", tilesNoInLine)
print()
print('--- configure tiles in TSP_br17 (position, rotation, flip) with, in overlapping, consistent borders ----')
# for each tile in the TSP_br17, store name, rotation, flip and resulting border in dictionaries
tileNameAtPos = dict()
rotAtPos = dict()
flipAtPos = dict()
tileBorderAtPos = dict()
# for a given partial configuration up to previous tile, continue with next tile at tilePos
# with the tiles in tileNames
def configTiles(tilePos, tileNames):
posY, posX = tilePos // tilesNoInLine, tilePos % tilesNoInLine # y/x of tile in TSP_br17
for tileName in tileNames: # at that position, try each of the given tiles
t1, r1, b1, l1 = tilesBorder[tileName]
for rot in range(4): # try every possible rotation
if rot > 0:
t1, r1, b1, l1 = l1, t1, r1, b1 # rotate border once
for flip in range(2): # try both without and with vertical flip
if flip > 0 or rot > 0:
t1, r1, b1, l1 = revstr(b1), revstr(r1), revstr(t1), revstr(l1) # vertical flip border once
if posX != 0: # if there is a left neighbor in TSP_br17
t2, r2, b2, l2 = tileBorderAtPos[tilePos - 1]
if r2 != revstr(l1): # check, if its right boarder fits my left boarder, both top to bottom
ok = False
continue
if posY != 0: # if there is a neighbor above in TSP_br17
t2, r2, b2, l2 = tileBorderAtPos[tilePos - tilesNoInLine]
if revstr(b2) != t1: # check, if its bottom boarder fits my top boarder, both left to right
ok = False
continue
# current tile configuration fits, store it to position
tileNameAtPos[tilePos] = tileName
tileBorderAtPos[tilePos] = (t1, r1, b1, l1)
rotAtPos[tilePos] = rot
flipAtPos[tilePos] = flip
if tilePos == tilesCount - 1: # no further tiles to fit in: solution found
tl, tr, br, bl = tileNameAtPos[0], \
tileNameAtPos[tilesNoInLine - 1], \
tileNameAtPos[tilesCount - tilesNoInLine], \
tileNameAtPos[tilesCount - 1]
print(tl, tr, br, bl)
print("res:", int(tl) * int(tr) * int(br) * int(bl))
return True
else: # further tiles to fit in, try recursively further configuration
ok = configTiles(tilePos + 1, [n for n in tileNames if n != tileName])
if ok: # return from found complete configuration
return ok
return False
configTiles(0, list(tilesBorder))
print(tileNameAtPos)
# 51214443014783
print()
print('---------------- part 2 ------------------')
tilesSize = len(tilesBorder[list(tilesBorder.keys())[0]][0]) # len(top(first tile))
print("tilesSize", tilesSize)
print('--------- create full picture of the consistent configuration found in part 1 --------')
picture = dict()
for by in range(tilesNoInLine):
for bx in range(tilesNoInLine):
tilePos = by * tilesNoInLine + bx
tileName = tileNameAtPos[tilePos]
tileRot = rotAtPos[tilePos]
tileFlip = flipAtPos[tilePos]
tile = tilesPic[tileName]
for y in range(tilesSize - 2): # two less, because top and bottom rows are left out
for x in range(tilesSize - 2): # two less, because left and right columns are left out
cx, cy = x + 1, y + 1 # access positions for leaving out top row and left column
if tileFlip:
cx, cy = cx, (tilesSize - 1) - cy # to flip tile vertically, flip get coordinates vertically
for r in range(tileRot):
cx, cy = (tilesSize - 1) - cy, cx # to rotate tile clockwise, rotate get coordinates ccw
picture[(by * (tilesSize - 2) + y), (bx * (tilesSize - 2) + x)] = tile[cy][cx] # copy pixel
picDim = tilesNoInLine * (tilesSize - 2) # we left out two pixels of boarder per tile, and have tilesNoInLine of it
def testPrint(picture, picDim):
for y in range(picDim):
s = ""
for x in range(picDim):
s = s + picture[y, x]
print(s)
#testPrint(picture, picDim)
print('--------- find monsters --------')
# ------------------
monsterStrings = [
" # ",
"# ## ## ###",
" # # # # # # "]
monsterHeight, monsterWidth = len(monsterStrings), len(monsterStrings[0])
monsterPosList = [ (y, x) for y in range(3) for x in range(20) if monsterStrings[y][x] == "#" ]
print(monsterPosList)
# try all combinations of rotation and with/without vertical flipping
# of the picture to find any monsters
for rot in range(4):
for flip in range(2):
rotPic = dict()
for px in range(picDim):
for py in range(picDim):
cx, cy = px, py
if flip:
cx, cy = cx, (picDim - 1) - cy # to flip tile vertically, flip get coordinates vertically
for r in range(rot):
cx, cy = (picDim - 1) - cy, cx # to rotate tile clockwise, rotate get coordinates ccw
rotPic[(py, px)] = picture[(cy, cx)]
# search monster in positions in picture, where its dimensions would fit in
monsterArea = set()
for px in range(picDim - monsterWidth):
for py in range(picDim - monsterHeight):
ok = True
for my, mx in monsterPosList:
if rotPic[(py + my, px + mx)] != "#":
ok = False # no monster here, if expected monster pixel is missing
break
if not ok:
continue
print("monster at", py, px)
for my, mx in monsterPosList:
monsterArea.add((py + my, px + mx))
if len(monsterArea) > 0:
print("flip and rot:", flip, rot)
testPrint(rotPic, picDim)
print("monster area", len(monsterArea))
hashArea = len([ (x, y) for x in range(picDim) for y in range(picDim) if rotPic[(y, x)] == "#"])
print("rest", hashArea - len(monsterArea))
sys.exit()
# rest 2065
|
import logging
from re import T
from algs_wrapper.base import Base
logger = logging.getLogger(__name__)
class VPCC(Base):
def __init__(self):
super().__init__()
def make_encode_cmd(self, in_pcfile, bin_file):
cmd = [
self._algs_cfg['encoder'],
f'--uncompressedDataPath={in_pcfile}',
f'--compressedStreamPath={bin_file}',
'--configurationFolder=cfg/',
'--config=cfg/common/ctc-common.cfg',
f'--config={self._algs_cfg["condition_cfg"]}',
f'--config={self._algs_cfg[self.rate]["rate_cfg"]}',
f'--videoEncoderOccupancyPath={self._algs_cfg["videoEncoder"]}',
f'--videoEncoderGeometryPath={self._algs_cfg["videoEncoder"]}',
f'--videoEncoderAttributePath={self._algs_cfg["videoEncoder"]}',
'--frameCount=1',
'--computeMetrics=0',
'--computeChecksum=0'
]
try:
assert self._has_color
except AssertionError as e:
logger.error(
"V-PCC only supports point cloud with color, please check the "
"input point cloud."
)
raise e
return cmd
def make_decode_cmd(self, bin_file, out_pcfile):
cmd = [
self._algs_cfg['decoder'],
f'--compressedStreamPath={bin_file}',
f'--reconstructedDataPath={out_pcfile}',
f'--videoDecoderOccupancyPath={self._algs_cfg["videoDecoder"]}',
f'--videoDecoderGeometryPath={self._algs_cfg["videoDecoder"]}',
f'--videoDecoderAttributePath={self._algs_cfg["videoDecoder"]}',
f'--inverseColorSpaceConversionConfig={self._algs_cfg["inverseColorSpaceConversionConfig"]}',
'--computeMetrics=0',
'--computeChecksum=0'
]
return cmd
|
import glob
import os
SOUNDSCAPES_FOLDER = "../data/Analysis/" # input
OUTPUT_FILE = "n_events.txt"
output = []
for soundscape_jam in glob.glob(SOUNDSCAPES_FOLDER + "*.jams"):
soundscape_name = str(os.path.basename(soundscape_jam).split(".")[0])
n_events = "err"
with (open(soundscape_jam)) as jams_file: # "n_events": 14,
for line in jams_file.readlines():
index = line.find("n_events")
if index > -1:
n_events = line[index + 11: index + 13]
break
output.append(soundscape_name + " " + n_events)
with open(OUTPUT_FILE, 'w', newline="") as file:
print("\r\n".join(output), file=file)
|
"""
Protocol Constants
"""
"""
Message Codes
First Byte of Frame payload contains the messages code
"""
# TODO: Does not need a full byte to be transfered
REQUEST = 0
REQUEST_ACK = 1
ACK = 2
RESPONSE_INFO = 3
DATA = 4
REQUEST_FIN = 5
"""
IDs
For a better performance:
Lower IDs are better than higher IDs
Identifier should dynamically match priority.
This is ignored here for simplicity
"""
LOW_PRIO = 0xffffffff
# Navigation
NAV_HMI_RESP = 0x000001
NAV_HMI_REQ = 0x000011
# air conditioning
AC_HMI_RESP = 0x000002
AC_HMI_REQ = 0x000021
# cd player
CDP_HMI_RESP = 0x000003
CDP_HMI_REQ = 0x000031
"""
Request Codes
"""
# Navigation
NAV_REQ_DESTINATION = 10
# Air conditioning
AC_REQ_TEMPERATURE = 20
# cd player
CDP_REQ_TRACK_NO = 30
|
import json
from typing import Optional
import pytest
from requests import Response
from fidesops.schemas.saas.saas_config import SaaSRequest
from fidesops.schemas.saas.shared_schemas import SaaSRequestParams, HTTPMethod
from fidesops.common_exceptions import FidesopsException
from fidesops.schemas.saas.strategy_configuration import (
OffsetPaginationConfiguration,
)
from fidesops.service.pagination.pagination_strategy_offset import (
OffsetPaginationStrategy,
)
@pytest.fixture(scope="function")
def response_with_body():
response = Response()
response._content = bytes(
json.dumps({"conversations": [{"id": 1}, {"id": 2}, {"id": 3}]}),
"utf-8",
)
return response
def test_offset(response_with_body):
config = OffsetPaginationConfiguration(
incremental_param="page", increment_by=1, limit=10
)
request_params: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"page": 1},
)
paginator = OffsetPaginationStrategy(config)
next_request: Optional[SaaSRequestParams] = paginator.get_next_request(
request_params, {}, response_with_body, "conversations"
)
assert next_request == SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"page": 2},
)
def test_offset_with_connector_param_reference(response_with_body):
config = OffsetPaginationConfiguration(
incremental_param="page",
increment_by=1,
limit={"connector_param": "limit"},
)
connector_params = {"limit": 10}
request_params: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"page": 1},
)
paginator = OffsetPaginationStrategy(config)
next_request: Optional[SaaSRequestParams] = paginator.get_next_request(
request_params, connector_params, response_with_body, "conversations"
)
assert next_request == SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"page": 2},
)
def test_offset_with_connector_param_reference_not_found(response_with_body):
config = OffsetPaginationConfiguration(
incremental_param="page",
increment_by=1,
limit={"connector_param": "limit"},
)
request_params: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"page": 1},
)
paginator = OffsetPaginationStrategy(config)
with pytest.raises(FidesopsException) as exc:
paginator.get_next_request(
request_params, {}, response_with_body, "conversations"
)
assert (
f"Unable to find value for 'limit' with the connector_param reference '{config.limit.connector_param}'"
== str(exc.value)
)
def test_offset_limit(response_with_body):
config = OffsetPaginationConfiguration(
incremental_param="page", increment_by=1, limit=10
)
request_params: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"page": 10},
)
paginator = OffsetPaginationStrategy(config)
next_request: SaaSRequestParams = paginator.get_next_request(
request_params, {}, response_with_body, "conversations"
)
assert next_request is None
def test_offset_increment_by_zero():
with pytest.raises(ValueError) as exc:
OffsetPaginationConfiguration(
incremental_param="page", increment_by=0, limit=10
)
assert f"'increment_by' cannot be zero" in str(exc.value)
def test_offset_increment_by_negative():
with pytest.raises(ValueError) as exc:
OffsetPaginationConfiguration(
incremental_param="page", increment_by=-1, limit=10
)
assert f"'increment_by' cannot be negative" in str(exc.value)
def test_offset_missing_start_value(response_with_body):
config = OffsetPaginationConfiguration(
incremental_param="page", increment_by=1, limit=10
)
request_params: SaaSRequestParams = SaaSRequestParams(
method=HTTPMethod.GET,
path="/conversations",
query_params={"row": 1},
)
paginator = OffsetPaginationStrategy(config)
with pytest.raises(FidesopsException) as exc:
paginator.get_next_request(
request_params, {}, response_with_body, "conversations"
)
assert (
f"Unable to find query param named '{config.incremental_param}' in request"
== str(exc.value)
)
def test_validate_request():
query_params = [{"name": "page", "value": 1}]
pagination = {
"strategy": "offset",
"configuration": {
"incremental_param": "page",
"increment_by": 1,
"limit": 10,
},
}
SaaSRequest(method="GET", path="/test", query_params=query_params, pagination=pagination)
def test_validate_request_missing_param():
query_params = [{"name": "row", "value": 1}]
pagination = {
"strategy": "offset",
"configuration": {
"incremental_param": "page",
"increment_by": 1,
"limit": 10,
},
}
with pytest.raises(ValueError) as exc:
SaaSRequest(method="GET", path="/test", query_params=query_params, pagination=pagination)
assert "Query param 'page' not found." in str(exc.value)
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import time
from selenium import webdriver
from bs4 import BeautifulSoup
import codecs
from selenium.common.exceptions import NoSuchElementException
import os
import os.path
chrome_options = Options()
chrome_options.add_experimental_option( "prefs",{'profile.managed_default_content_settings.javascript': 2})
driver = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
wait = WebDriverWait(driver, 10)
# The file containing the links should be in the working directory or make sure that the command has the correct path.
with open("urls.txt") as f:
urls = f.readlines()
# Checks if directory name "html" exists, if not, it creates directory.
newpath = r'html'
if not os.path.exists(newpath):
os.makedirs(newpath)
driver.get("http://www.dianping.com/search/keyword/58/0_%E6%B1%89%E5%A0%A1")
Print("Diangping recently changed the architecture of their website. The old web site looks like Yelp while the new website looks more like Google Maps. Please review website before continuing")
conti = input("Press 1 if page is new format (Looks like Google Maps), 2 if page is old style (Looks like Yelp), 0 to quit ")
if conti==0:
driver.quit()
quit()
elif conti == 2:
snext = "next"
elif conti == 1:
snext = "NextPage"
# function checks if there is more than one page with results
def checkpages(url):
try:
driver.find_element_by_class_name(snext)
return True
except NoSuchElementException:
return False
# function saves every html page and names it by numerical order
def htmlwriter():
n = len([name for name in os.listdir('.') if os.path.isfile(name)])
n1 = "%04d"%n
html = driver.page_source
html_file = codecs.open(str(n1) +".html", "w", "utf-8")
print "retrieved " +str(n1)
print driver.current_url
html_file.write(html)
html_file.close()
for item in urls:
driver.get(item)
time.sleep(10)
htmlwriter()
while checkpages(driver)==True:
try:
time.sleep(10)
element = driver.find_element_by_class_name(snext)
element.click()
except NoSuchElementException:
htmlwriter()
break
time.sleep(2)
htmlwriter()
driver.quit()
|
import asyncio
from chunk import Chunk
from datetime import datetime
import pickle
from typing import Callable, Dict
import numpy as np
import open3d as o3d
from .message import BaseMessage, ResponseMessage, MeshesMessage, DeformableMeshesMessage
class MeshChunksHandler:
""" A middleware to merge chunks into its MeshesMessage
Params
------
next_handler: the method to deal with the reconstructed fully-
received AddRigidBodyMeshMessage, or other messages
"""
def __init__(self, next_handler: Callable[[BaseMessage], None]):
self.mesh_name_2_msg: Dict[str, MeshesMessage] = {}
self.mesh_name_2_chunks: Dict[str, Chunk] = {}
self.handler = next_handler
def __call__(self, msg: BaseMessage):
if isinstance(msg, MeshesMessage):
self.mesh_name_2_msg[msg.mesh_name] = msg
elif isinstance(msg, MeshesMessage.Chunk):
name = msg.mesh_name
if name not in self.mesh_name_2_chunks:
self.mesh_name_2_chunks[name] = [msg]
else:
self.mesh_name_2_chunks[name].append(msg)
# check whether all the chunks has been collected
if name in self.mesh_name_2_msg and \
len(self.mesh_name_2_chunks[name]) == self.mesh_name_2_msg[name].chunk_num:
self.mesh_name_2_chunks[name].sort(key = lambda x: x.chunk_id)
meshmsg = self.mesh_name_2_msg[name]
for chunk in self.mesh_name_2_chunks[name]:
meshmsg.chunks.append(chunk)
del self.mesh_name_2_chunks[name]
del self.mesh_name_2_msg[name]
if name.startswith("MPM::MESHES::"):
# a open3d re-constructed meshes from point cloud
pcdMessage: DeformableMeshesMessage = pickle.loads(meshmsg.mesh_file)
self.handler(pcdMessage)
else:
# a nomal meshes
self.handler(meshmsg)
else:
self.handler(msg)
class AsyncServer:
""" An async server
Params
------
message_handler: a callback to hangle the incoming message
**NOTE** You don't need to care about the chunks of a mesh
file, because the above middleware will merge the chunks into
one AddMeshMessageHandler
"""
def __init__(self, message_handler: Callable[[BaseMessage], None], logger:Callable[[str], None] = None) -> None:
self.message_handler = MeshChunksHandler(message_handler)
self.logger = logger if logger else (lambda s: print('[DEBUG]', datetime.now(), s))
async def _handle_incoming_request(self, reader: asyncio.StreamReader, writer):
""" When there is request coming from the client, the
method retrieve the message object from the request,
verify the message, respond to the client and finally
invoke the callback `message_handler` to deal with the
message
Params
------
reader: from which the request is to be read
writer: to which the response will be written
"""
bstr, done = b'', False
while not done:
line = await reader.read(1024)
bstr += line
done = len(line) < 1024
self.logger(f"{len(bstr)} bytes are read")
request, err = BaseMessage.unpack(bstr)
self.logger(f"No.{request.message_idx if request != None else 'Unknow'} message of type {type(request)} is decoded with error: {err}, preparing response")
if request != None:
response = ResponseMessage(request.message_idx, err)
else:
response = ResponseMessage(0, err)
writer.write(pickle.dumps(response))
await writer.drain()
self.logger(f"No.{request.message_idx if request != None else 'Unknow'} message's response has been sent")
writer.close()
self.logger(f"connection terminated")
if not err:
self.message_handler(request)
async def run_server(self):
""" Start the server
Call asyncio.run(server.run_server()) in the main
to start.
"""
self.logger(f"starting server at 127.0.0.1:{BaseMessage.port}")
server = await asyncio.start_server(
self._handle_incoming_request,
'127.0.0.1',
BaseMessage.port
)
async with server:
await server.serve_forever()
|
# coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains Batfish client commands that query the Batfish service."""
from __future__ import absolute_import, print_function
import base64
import json
import logging
import os
import tempfile
from typing import Any, Dict, List, Optional, Union # noqa: F401
import six
from requests import HTTPError
from pybatfish.client.consts import CoordConsts, WorkStatusCode
from pybatfish.client.diagnostics import (_upload_diagnostics,
_warn_on_snapshot_failure)
from pybatfish.datamodel.primitives import ( # noqa: F401
AutoCompleteSuggestion,
AutoCompletionType, Edge,
Interface)
from pybatfish.datamodel.referencelibrary import (NodeRoleDimension,
NodeRolesData, ReferenceBook,
ReferenceLibrary)
from pybatfish.exception import BatfishException
from pybatfish.settings.issues import IssueConfig # noqa: F401
from pybatfish.util import (BfJsonEncoder, get_uuid, validate_name, zip_dir)
from . import resthelper, restv2helper, workhelper
from .options import Options
from .session import Session
from .workhelper import (get_work_status,
kill_work)
# TODO: normally libraries don't configure logging in code
_bfDebug = True
bf_logger = logging.getLogger("pybatfish.client")
bf_session = Session(bf_logger)
if _bfDebug:
bf_logger.setLevel(logging.INFO)
bf_logger.addHandler(logging.StreamHandler())
else:
bf_logger.addHandler(logging.NullHandler())
__all__ = ['bf_add_analysis',
'bf_add_issue_config',
'bf_add_node_role_dimension',
'bf_add_reference_book',
'bf_auto_complete',
'bf_delete_analysis',
'bf_delete_issue_config',
'bf_delete_network',
'bf_delete_node_role_dimension',
'bf_delete_reference_book',
'bf_delete_snapshot',
'bf_extract_answer_summary',
'bf_fork_snapshot',
'bf_generate_dataplane',
'bf_get_analysis_answers',
'bf_get_answer',
'bf_get_info',
'bf_get_issue_config',
'bf_get_node_role_dimension',
'bf_get_node_roles',
'bf_get_reference_book',
'bf_get_reference_library',
'bf_get_snapshot_inferred_node_role_dimension',
'bf_get_snapshot_inferred_node_roles',
'bf_get_snapshot_node_role_dimension',
'bf_get_snapshot_node_roles',
'bf_get_work_status',
'bf_init_analysis',
'bf_init_snapshot',
'bf_kill_work',
'bf_list_analyses',
'bf_list_networks',
'bf_list_incomplete_works',
'bf_list_questions',
'bf_list_snapshots',
'bf_logger',
'bf_put_node_roles',
'bf_read_question_settings',
'bf_run_analysis',
'bf_session',
'bf_set_network',
'bf_set_snapshot',
'bf_upload_diagnostics',
'bf_write_question_settings']
def bf_add_analysis(analysisName, questionDirectory):
return _bf_init_or_add_analysis(analysisName, questionDirectory, False)
def bf_add_issue_config(issue_config):
# type: (IssueConfig) -> None
"""
Add or update the active network's configuration for an issue .
:param issue_config: The IssueConfig object to add or update
:type issue_config: :class:`pybatfish.settings.issues.IssueConfig`
"""
restv2helper.add_issue_config(bf_session, issue_config)
def bf_add_node_role_dimension(dimension):
# type: (NodeRoleDimension) -> None
"""
Adds another role dimension to the active network.
Individual roles within the dimension must have a valid (java) regex.
The node list within those roles, if present, is ignored by the server.
:param dimension: The NodeRoleDimension object for the dimension to add
:type dimension: :class:`pybatfish.datamodel.referencelibrary.NodeRoleDimension`
"""
if dimension.type == "AUTO":
raise ValueError("Cannot add a dimension of type AUTO")
restv2helper.add_node_role_dimension(bf_session, dimension)
def bf_add_reference_book(book):
# type: (ReferenceBook) -> None
"""
Adds another reference book to the active network.
:param book: The ReferenceBook object to add
:type book: :class:`pybatfish.datamodel.referencelibrary.ReferenceBook`
"""
restv2helper.add_reference_book(bf_session, book)
def bf_auto_complete(completionType, query, maxSuggestions=None):
# type: (AutoCompletionType, str, Optional[int]) -> List[AutoCompleteSuggestion]
"""Auto complete the partial query based on its type."""
jsonData = workhelper.get_data_auto_complete(bf_session, completionType,
query, maxSuggestions)
response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_AUTO_COMPLETE,
jsonData)
if CoordConsts.SVC_KEY_SUGGESTIONS in response:
suggestions = [AutoCompleteSuggestion.from_dict(json.loads(suggestion))
for suggestion in
response[CoordConsts.SVC_KEY_SUGGESTIONS]]
return suggestions
raise BatfishException("Unexpected response: {}.".format(response))
def bf_delete_analysis(analysisName):
jsonData = workhelper.get_data_delete_analysis(bf_session, analysisName)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_DEL_ANALYSIS,
jsonData)
return jsonResponse
def bf_delete_issue_config(major, minor):
# type: (str, str) -> None
"""Deletes the issue config for the active network."""
restv2helper.delete_issue_config(bf_session, major, minor)
def bf_delete_network(name):
# type: (str) -> None
"""
Delete network by name.
:param name: name of the network to delete
:type name: string
"""
if name is None:
raise ValueError('Network to be deleted must be supplied')
jsonData = workhelper.get_data_delete_network(bf_session, name)
resthelper.get_json_response(bf_session, CoordConsts.SVC_RSC_DEL_NETWORK,
jsonData)
def bf_delete_node_role_dimension(dimension):
# type: (str) -> None
"""Deletes the definition of the given role dimension for the active network."""
restv2helper.delete_node_role_dimension(bf_session, dimension)
def bf_delete_reference_book(book_name):
# type: (str) -> None
"""Deletes the reference book with the specified name for the active network."""
restv2helper.delete_reference_book(bf_session, book_name)
def bf_delete_snapshot(name):
# type: (str) -> None
"""
Delete named snapshot from current network.
:param name: name of the snapshot to delete
:type name: string
"""
_check_network()
if name is None:
raise ValueError('Snapshot to be deleted must be supplied')
json_data = workhelper.get_data_delete_snapshot(bf_session, name)
resthelper.get_json_response(bf_session, CoordConsts.SVC_RSC_DEL_SNAPSHOT,
json_data)
def bf_extract_answer_summary(answer_dict):
"""Get the answer for a previously asked question."""
if "status" not in answer_dict or answer_dict["status"] != "SUCCESS":
raise BatfishException("Question was not answered successfully")
if "summary" not in answer_dict:
raise BatfishException("Summary not found in the answer")
return answer_dict["summary"]
def bf_fork_snapshot(base_name, name=None, overwrite=False,
background=False, deactivate_interfaces=None,
deactivate_links=None, deactivate_nodes=None,
restore_interfaces=None, restore_links=None,
restore_nodes=None, add_files=None):
# type: (str, Optional[str], bool, bool, Optional[List[Interface]], Optional[List[Edge]], Optional[List[str]], Optional[List[Interface]], Optional[List[Edge]], Optional[List[str]], Optional[str]) -> Union[str, Dict, None]
"""Copy an existing snapshot and deactivate or reactivate specified interfaces, nodes, and links on the copy.
:param base_name: name of the snapshot to copy
:type base_name: string
:param name: name of the snapshot to initialize
:type name: string
:param overwrite: whether or not to overwrite an existing snapshot with the
same name
:type overwrite: bool
:param background: whether or not to run the task in the background
:type background: bool
:param deactivate_interfaces: list of interfaces to deactivate in new snapshot
:type deactivate_interfaces: list[Interface]
:param deactivate_links: list of links to deactivate in new snapshot
:type deactivate_links: list[Edge]
:param deactivate_nodes: list of names of nodes to deactivate in new snapshot
:type deactivate_nodes: list[str]
:param restore_interfaces: list of interfaces to reactivate
:type restore_interfaces: list[Interface]
:param restore_links: list of links to reactivate
:type restore_links: list[Edge]
:param restore_nodes: list of names of nodes to reactivate
:type restore_nodes: list[str]
:param add_files: path to zip file or directory containing files to add
:type add_files: str
:return: name of initialized snapshot, JSON dictionary of task status if
background=True, or None if the call fails
:rtype: Union[str, Dict, None]
"""
if bf_session.network is None:
raise ValueError('Network must be set to fork a snapshot.')
if name is None:
name = Options.default_snapshot_prefix + get_uuid()
validate_name(name)
if name in bf_list_snapshots():
if overwrite:
bf_delete_snapshot(name)
else:
raise ValueError(
'A snapshot named ''{}'' already exists in network ''{}'''.format(
name, bf_session.network))
encoded_file = None
if add_files is not None:
file_to_send = add_files
if os.path.isdir(add_files):
temp_zip_file = tempfile.NamedTemporaryFile()
zip_dir(add_files, temp_zip_file)
file_to_send = temp_zip_file.name
if os.path.isfile(file_to_send):
with open(file_to_send, "rb") as f:
encoded_file = base64.b64encode(f.read()).decode('ascii')
json_data = {
"snapshotBase": base_name,
"snapshotNew": name,
"deactivateInterfaces": deactivate_interfaces,
"deactivateLinks": deactivate_links,
"deactivateNodes": deactivate_nodes,
"restoreInterfaces": restore_interfaces,
"restoreLinks": restore_links,
"restoreNodes": restore_nodes,
"zipFile": encoded_file
}
restv2helper.fork_snapshot(bf_session,
json_data)
return _parse_snapshot(name, background)
def bf_generate_dataplane(snapshot=None):
# type: (Optional[str]) -> str
"""Generates the data plane for the supplied snapshot. If no snapshot argument is given, uses the last snapshot initialized."""
snapshot = bf_session.get_snapshot(snapshot)
work_item = workhelper.get_workitem_generate_dataplane(bf_session, snapshot)
answer_dict = workhelper.execute(work_item, bf_session)
return str(answer_dict["status"].value)
def bf_get_analysis_answers(name, snapshot=None,
reference_snapshot=None):
# type: (str, str, Optional[str]) -> Any
"""Get the answers for a previously asked analysis."""
snapshot = bf_session.get_snapshot(snapshot)
json_data = workhelper.get_data_get_analysis_answers(
bf_session, name, snapshot, reference_snapshot)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_GET_ANALYSIS_ANSWERS, json_data)
answers_dict = json.loads(json_response['answers'])
return answers_dict
def bf_get_answer(questionName, snapshot, reference_snapshot=None):
# type: (str, str, Optional[str]) -> Any
"""
Get the answer for a previously asked question.
:param questionName: the unique identifier of the previously asked question
:param snapshot: the snapshot the question is run on
:param reference_snapshot: if present, the snapshot against which the answer
was computed differentially.
"""
jsonData = workhelper.get_data_get_answer(bf_session, questionName,
snapshot, reference_snapshot)
response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_GET_ANSWER,
jsonData)
answerJson = json.loads(response["answer"])
return answerJson
def bf_get_info():
jsonResponse = resthelper.get_json_response(bf_session, '', useHttpGet=True)
return jsonResponse
def bf_get_issue_config(major, minor):
# type: (str, str) -> IssueConfig
"""Returns the issue config for the active network."""
return IssueConfig.from_dict(
restv2helper.get_issue_config(bf_session, major, minor))
def bf_get_node_role_dimension(dimension):
# type: (str) -> NodeRoleDimension
"""Returns the definition of the given node role dimension for the active network."""
return NodeRoleDimension.from_dict(
restv2helper.get_node_role_dimension(bf_session, dimension))
def bf_get_node_roles():
# type: () -> NodeRolesData
"""Returns the definitions of node roles for the active network."""
return NodeRolesData.from_dict(restv2helper.get_node_roles(bf_session))
def bf_get_reference_book(book_name):
# type: (str) -> ReferenceBook
"""Returns the reference book with the specified for the active network."""
return ReferenceBook.from_dict(
restv2helper.get_reference_book(bf_session, book_name))
def bf_get_reference_library():
# type: () -> ReferenceLibrary
"""Returns the reference library for the active network."""
return ReferenceLibrary.from_dict(
restv2helper.get_reference_library(bf_session))
def bf_get_snapshot_inferred_node_roles():
# type: () -> NodeRolesData
"""Gets suggested definitions and hypothetical assignments of node roles for the active network and snapshot."""
return NodeRolesData.from_dict(
restv2helper.get_snapshot_inferred_node_roles(bf_session))
def bf_get_snapshot_inferred_node_role_dimension(dimension):
# type: (str) -> NodeRoleDimension
"""Gets the suggested definition and hypothetical assignments of node roles for the given inferred dimension for the active network and snapshot."""
return NodeRoleDimension.from_dict(
restv2helper.get_snapshot_inferred_node_role_dimension(bf_session,
dimension))
def bf_get_snapshot_node_roles():
# type: () -> NodeRolesData
"""Returns the definitions and assignments of node roles for the active network and snapshot."""
return NodeRolesData.from_dict(
restv2helper.get_snapshot_node_roles(bf_session))
def bf_get_snapshot_node_role_dimension(dimension):
# type: (str) -> NodeRoleDimension
"""Returns the defintion and assignments of node roles for the given dimension for the active network and snapshot."""
return NodeRoleDimension.from_dict(
restv2helper.get_snapshot_node_role_dimension(bf_session, dimension))
def bf_get_work_status(wItemId):
return get_work_status(wItemId, bf_session)
def _bf_init_or_add_analysis(analysisName, questionDirectory, newAnalysis):
from pybatfish.question.question import _load_questions_from_dir
_check_network()
questions = _load_questions_from_dir(questionDirectory)
analysis = {
question_name: question_class(question_name=question_name)
for question_name, question_class in six.iteritems(questions)
}
with tempfile.NamedTemporaryFile() as tempFile:
with open(tempFile.name, 'w') as analysisFile:
json.dump(analysis, analysisFile, indent=2, sort_keys=True,
cls=BfJsonEncoder)
json_data = workhelper.get_data_configure_analysis(
bf_session, newAnalysis, analysisName, tempFile.name, None)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_CONFIGURE_ANALYSIS, json_data)
return json_response
def bf_init_analysis(analysisName, questionDirectory):
return _bf_init_or_add_analysis(analysisName, questionDirectory, True)
def bf_init_snapshot(upload, name=None, overwrite=False, background=False):
# type: (str, Optional[str], bool, bool) -> Union[str, Dict[str, str]]
"""Initialize a new snapshot.
:param upload: snapshot to upload
:type upload: zip file or directory
:param name: name of the snapshot to initialize
:type name: string
:param overwrite: whether or not to overwrite an existing snapshot with the
same name
:type overwrite: bool
:param background: whether or not to run the task in the background
:type background: bool
:return: name of initialized snapshot, or JSON dictionary of task status if background=True
:rtype: Union[str, Dict]
"""
if name is None:
name = Options.default_snapshot_prefix + get_uuid()
validate_name(name)
if bf_session.network is None:
bf_set_network()
if name in bf_list_snapshots():
if overwrite:
bf_delete_snapshot(name)
else:
raise ValueError(
'A snapshot named ''{}'' already exists in network ''{}'''.format(
name, bf_session.network))
file_to_send = upload
if os.path.isdir(upload):
temp_zip_file = tempfile.NamedTemporaryFile()
zip_dir(upload, temp_zip_file)
file_to_send = temp_zip_file.name
json_data = workhelper.get_data_upload_snapshot(bf_session, name,
file_to_send)
resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_UPLOAD_SNAPSHOT,
json_data)
return _parse_snapshot(name, background)
def _parse_snapshot(name, background):
# type: (str, bool) -> Union[str, Dict[str, str]]
"""Parse specified snapshot.
:param name: name of the snapshot to initialize
:type name: str
:param background: whether or not to run the task in the background
:type background: bool
:return: name of initialized snapshot, or JSON dictionary of task status if background=True
:rtype: Union[str, Dict]
"""
work_item = workhelper.get_workitem_parse(bf_session, name)
answer_dict = workhelper.execute(work_item, bf_session,
background=background)
if background:
bf_session.baseSnapshot = name
return answer_dict
status = WorkStatusCode(answer_dict["status"])
if status != WorkStatusCode.TERMINATEDNORMALLY:
init_log = restv2helper.get_work_log(bf_session, name, work_item.id)
raise BatfishException(
'Initializing snapshot {ss} failed with status {status}\n{log}'.format(
ss=name, status=status, log=init_log))
else:
bf_session.baseSnapshot = name
bf_logger.info("Default snapshot is now set to %s",
bf_session.baseSnapshot)
if bf_session.enable_diagnostics:
_warn_on_snapshot_failure()
return bf_session.baseSnapshot
def bf_kill_work(wItemId):
return kill_work(bf_session, wItemId)
def bf_list_analyses():
_check_network()
jsonData = workhelper.get_data_list_analyses(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_ANALYSES,
jsonData)
answer = jsonResponse['analysislist']
return answer
def bf_list_networks():
# type: () -> List[str]
"""
List networks the session's API key can access.
:return: a list of network names
"""
json_data = workhelper.get_data_list_networks(bf_session)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_LIST_NETWORKS, json_data)
return list(map(str, json_response['networklist']))
def bf_list_incomplete_works():
jsonData = workhelper.get_data_list_incomplete_work(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_INCOMPLETE_WORK,
jsonData)
return jsonResponse
def bf_list_questions():
_check_network()
jsonData = workhelper.get_data_list_questions(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_QUESTIONS,
jsonData)
answer = jsonResponse['questionlist']
return answer
def bf_list_snapshots(verbose=False):
# type: (bool) -> Union[List[str], List[Dict[str,Any]]]
"""
List snapshots for the current network.
:param verbose: If true, return the full output of Batfish, including
snapshot metadata.
:return: a list of snapshot names or the full json response containing
snapshots and metadata (if `verbose=True`)
"""
return restv2helper.list_snapshots(bf_session, verbose)
def bf_put_node_roles(node_roles_data):
# type: (NodeRolesData) -> None
"""Writes the definitions of node roles for the active network. Completely replaces any existing definitions."""
restv2helper.put_node_roles(bf_session, node_roles_data)
def bf_read_question_settings(question_class, json_path=None):
# type: (str, Optional[List[str]]) -> Dict[str, Any]
"""
Retrieves the network-wide JSON settings tree for the specified question class.
:param question_class: The class of question whose settings are to be read
:type question_class: string
:param json_path: If supplied, return only the subtree reached by successively
traversing each key in json_path starting from the root.
:type json_path: list
"""
return restv2helper.read_question_settings(bf_session, question_class,
json_path)
def bf_run_analysis(name, snapshot, reference_snapshot=None):
# type: (str, str, Optional[str]) -> Any
work_item = workhelper.get_workitem_run_analysis(
bf_session, name, snapshot, reference_snapshot)
work_answer = workhelper.execute(work_item, bf_session)
if work_answer["status"] != WorkStatusCode.TERMINATEDNORMALLY:
raise BatfishException("Failed to run analysis")
return bf_get_analysis_answers(name, snapshot, reference_snapshot)
def bf_set_network(name=None, prefix=Options.default_network_prefix):
# type: (str, str) -> str
"""
Configure the network used for analysis.
:param name: name of the network to set. If `None`, a name will be generated using prefix.
:type name: string
:param prefix: prefix to prepend to auto-generated network names if name is empty
:type name: string
:return: The name of the configured network, if configured successfully.
:rtype: string
:raises BatfishException: if configuration fails
"""
if name is None:
name = prefix + get_uuid()
validate_name(name, "network")
try:
net = restv2helper.get_network(bf_session, name)
bf_session.network = str(net['name'])
return bf_session.network
except HTTPError as e:
if e.response.status_code != 404:
raise BatfishException('Unknown error accessing network', e)
json_data = workhelper.get_data_init_network(bf_session, name)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_INIT_NETWORK, json_data)
network_name = json_response.get(CoordConsts.SVC_KEY_NETWORK_NAME)
if network_name is None:
raise BatfishException(
"Network initialization failed. Server response: {}".format(
json_response))
bf_session.network = str(network_name)
return bf_session.network
def bf_set_snapshot(name=None, index=None):
# type: (Optional[str], Optional[int]) -> str
"""
Set the current snapshot by name or index.
:param name: name of the snapshot to set as the current snapshot
:type name: string
:param index: set the current snapshot to the ``index``-th most recent snapshot
:type index: int
:return: the name of the successfully set snapshot
:rtype: str
"""
if name is None and index is None:
raise ValueError('One of name and index must be set')
if name is not None and index is not None:
raise ValueError('Only one of name and index can be set')
snapshots = bf_list_snapshots()
# Index specified, simply give the ith snapshot
if index is not None:
if not (-len(snapshots) <= index < len(snapshots)):
raise IndexError(
"Server has only {} snapshots: {}".format(
len(snapshots), snapshots))
bf_session.baseSnapshot = str(snapshots[index])
# Name specified, make sure it exists.
else:
assert name is not None # type-hint to Python
if name not in snapshots:
raise ValueError(
'No snapshot named ''{}'' was found in network ''{}'': {}'.format(
name, bf_session.network, snapshots))
bf_session.baseSnapshot = name
bf_logger.info("Default snapshot is now set to %s", bf_session.baseSnapshot)
return bf_session.baseSnapshot
def bf_upload_diagnostics(dry_run=True, netconan_config=None):
# type: (bool, str) -> str
"""
Fetch, anonymize, and optionally upload snapshot diagnostics information.
This runs a series of diagnostic questions on the current snapshot
(including collecting parsing and conversion information).
The information collected is anonymized with
`Netconan <https://github.com/intentionet/netconan>`_ which either
anonymizes passwords and IP addresses (default) or uses the settings in
the provided `netconan_config`.
The anonymous information is then either saved locally (if `dry_run` is
True) or uploaded to Batfish developers (if `dry_run` is False). The
uploaded information will be accessible only to Batfish developers and will
be used to help diagnose any issues you encounter.
:param dry_run: whether or not to skip upload; if False, anonymized files will be stored locally, otherwise anonymized files will be uploaded to Batfish developers
:type dry_run: bool
:param netconan_config: path to Netconan configuration file
:type netconan_config: string
:return: location of anonymized files (local directory if doing dry run, otherwise upload ID)
:rtype: string
"""
return _upload_diagnostics(dry_run=dry_run, netconan_config=netconan_config)
def bf_write_question_settings(settings, question_class, json_path=None):
# type: (Dict[str, Any], str, Optional[List[str]]) -> None
"""
Write the network-wide JSON settings tree for the specified question class.
:param settings: The JSON representation of the settings to be written
:type settings: dict
:param question_class: The class of question to configure
:type question_class: string
:param json_path: If supplied, write settings to the subtree reached by successively
traversing each key in json_path starting from the root. Any absent keys along
the path will be created.
:type json_path: list
"""
restv2helper.write_question_settings(bf_session, settings, question_class,
json_path)
def _check_network():
"""Check if current network is set."""
if bf_session.network is None:
raise BatfishException("Network is not set")
|
# encoding: utf-8
__author__ = 'jesuejunior'
|
"""Task definitions for machine translation tasks."""
import codecs
import collections
import math
import os
from typing import Iterable, List, Sequence, Type
from allennlp.data import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.training.metrics import Average
from ..utils.data_loaders import process_sentence
from .registry import register_task
from .tasks import (
UNK_TOK_ALLENNLP,
UNK_TOK_ATOMIC,
SequenceGenerationTask,
atomic_tokenize,
sentence_to_text_field,
)
# TODO: remove dummy / debug tasks
@register_task("wmt_debug", rel_path="wmt_debug/", max_targ_v_size=5000)
@register_task("wmt17_en_ru", rel_path="wmt17_en_ru/", max_targ_v_size=20000)
@register_task("wmt14_en_de", rel_path="wmt14_en_de/", max_targ_v_size=20000)
class MTTask(SequenceGenerationTask):
"""Machine Translation Task"""
def __init__(self, path, max_seq_len, max_targ_v_size, name, **kw):
""" """
super().__init__(name, **kw)
self.scorer1 = Average()
self.scorer2 = Average()
self.scorer3 = Average()
self.scorers = [self.scorer1, self.scorer2, self.scorer3]
self.val_metric = "%s_perplexity" % self.name
self.val_metric_decreases = True
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tokens"
self.max_targ_v_size = max_targ_v_size
self.target_indexer = {"words": SingleIdTokenIndexer(namespace=self._label_namespace)}
self.files_by_split = {
split: os.path.join(path, "%s.txt" % split) for split in ["train", "val", "test"]
}
def load_data(self):
# Data is exposed as iterable: no preloading
pass
def get_split_text(self, split: str):
""" Get split text as iterable of records.
Split should be one of 'train', 'val', or 'test'.
"""
return self.get_data_iter(self.files_by_split[split])
def get_all_labels(self) -> List[str]:
""" Build vocabulary and return it as a list """
word2freq = collections.Counter()
for split in ["train", "val"]:
for _, sent in self.get_data_iter(self.files_by_split[split]):
for word in sent:
word2freq[word] += 1
return [w for w, _ in word2freq.most_common(self.max_targ_v_size)]
def get_data_iter(self, path):
""" Load data """
with codecs.open(path, "r", "utf-8", errors="ignore") as txt_fh:
for row in txt_fh:
row = row.strip().split("\t")
if len(row) < 2 or not row[0] or not row[1]:
continue
src_sent = process_sentence(self._tokenizer_name, row[0], self.max_seq_len)
# Currently: force Moses tokenization on targets
tgt_sent = process_sentence("MosesTokenizer", row[1], self.max_seq_len)
yield (src_sent, tgt_sent)
def get_sentences(self) -> Iterable[Sequence[str]]:
""" Yield sentences, used to compute vocabulary. """
for split in self.files_by_split:
# Don't use test set for vocab building.
if split.startswith("test"):
continue
path = self.files_by_split[split]
yield from self.get_data_iter(path)
def count_examples(self):
""" Compute here b/c we're streaming the sentences. """
example_counts = {}
for split, split_path in self.files_by_split.items():
example_counts[split] = sum(
1 for _ in codecs.open(split_path, "r", "utf-8", errors="ignore")
)
self.example_counts = example_counts
def process_split(self, split, indexers) -> Iterable[Type[Instance]]:
""" Process split text into a list of AllenNLP Instances. """
def _make_instance(input_, target):
d = {
"inputs": sentence_to_text_field(input_, indexers),
"targs": sentence_to_text_field(target, self.target_indexer),
}
return Instance(d)
for sent1, sent2 in split:
yield _make_instance(sent1, sent2)
def get_metrics(self, reset=False):
"""Get metrics specific to the task"""
avg_nll = self.scorer1.get_metric(reset)
unk_ratio_macroavg = self.scorer3.get_metric(reset)
return {
"perplexity": math.exp(avg_nll),
"bleu_score": 0,
"unk_ratio_macroavg": unk_ratio_macroavg,
}
@register_task("reddit_s2s", rel_path="Reddit/", max_targ_v_size=0)
@register_task("reddit_s2s_3.4G", rel_path="Reddit_3.4G/", max_targ_v_size=0)
class RedditSeq2SeqTask(MTTask):
""" Task for seq2seq using reddit data
Note: max_targ_v_size doesn't do anything here b/c the
target is in English"""
def __init__(self, path, max_seq_len, max_targ_v_size, name, **kw):
super().__init__(
path=path, max_seq_len=max_seq_len, max_targ_v_size=max_targ_v_size, name=name, **kw
)
self._label_namespace = None
self.target_indexer = {"words": SingleIdTokenIndexer("tokens")}
self.files_by_split = {
"train": os.path.join(path, "train.csv"),
"val": os.path.join(path, "val.csv"),
"test": os.path.join(path, "test.csv"),
}
def get_data_iter(self, path):
""" Load data """
with codecs.open(path, "r", "utf-8", errors="ignore") as txt_fh:
for row in txt_fh:
row = row.strip().split("\t")
if len(row) < 4 or not row[2] or not row[3]:
continue
src_sent = process_sentence(self._tokenizer_name, row[2], self.max_seq_len)
tgt_sent = process_sentence(self._tokenizer_name, row[3], self.max_seq_len)
yield (src_sent, tgt_sent)
def process_split(self, split, indexers) -> Iterable[Type[Instance]]:
""" Process split text into a list of AllenNLP Instances. """
def _make_instance(input_, target):
d = {
"inputs": sentence_to_text_field(input_, indexers),
"targs": sentence_to_text_field(target, self.target_indexer),
}
return Instance(d)
for sent1, sent2 in split:
yield _make_instance(sent1, sent2)
@register_task("wiki2_s2s", rel_path="WikiText2/", max_targ_v_size=0)
@register_task("wiki103_s2s", rel_path="WikiText103/", max_targ_v_size=0)
class Wiki103Seq2SeqTask(MTTask):
""" Skipthought objective on Wiki103 """
def __init__(self, path, max_seq_len, max_targ_v_size, name, **kw):
""" Note: max_targ_v_size does nothing here """
super().__init__(path, max_seq_len, max_targ_v_size, name, **kw)
# for skip-thoughts setting, all source sentences are sentences that
# followed by another sentence (which are all but the last one).
# Similar for self.target_sentences
self._nonatomic_toks = [UNK_TOK_ALLENNLP, "<unk>"]
self._label_namespace = None
self.target_indexer = {"words": SingleIdTokenIndexer("tokens")}
self.files_by_split = {
"train": os.path.join(path, "train.sentences.txt"),
"val": os.path.join(path, "valid.sentences.txt"),
"test": os.path.join(path, "test.sentences.txt"),
}
def get_data_iter(self, path):
""" Load data """
nonatomic_toks = self._nonatomic_toks
with codecs.open(path, "r", "utf-8", errors="ignore") as txt_fh:
for row in txt_fh:
toks = row.strip()
if not toks:
continue
sent = atomic_tokenize(
toks,
UNK_TOK_ATOMIC,
nonatomic_toks,
self.max_seq_len,
tokenizer_name=self._tokenizer_name,
)
yield sent, []
@classmethod
def get_num_examples(cls, split_text):
""" Return number of examples in the result of get_split_text.
Subclass can override this if data is not stored in column format.
"""
# pair sentences# = sent# - 1
return len(split_text) - 1
def process_split(self, split, indexers) -> Iterable[Type[Instance]]:
""" Process a language modeling split.
Split is a single list of sentences here.
"""
def _make_instance(prev_sent_, sent_):
d = {
"inputs": sentence_to_text_field(prev_sent_, indexers),
"targs": sentence_to_text_field(sent_, self.target_indexer),
}
return Instance(d)
prev_sent = None
for sent, _ in split:
if prev_sent is None:
prev_sent = sent
continue
yield _make_instance(prev_sent, sent)
prev_sent = sent
|
import os
from uploader.api import (
get_repo_list, create_repo,
get_repo, upload_file,
get_share_link,
get_token
)
from uploader.logger import get_logger
logger = get_logger(__name__)
def upload_local_file(
server, token, repo_id, upload_url, repo_path,
file_path, replace=True
):
file_name = os.path.basename(file_path)
with open(file_path, 'rb') as file_obj:
return (
upload_file(
server, token, repo_id, upload_url,
repo_path, file_name, file_obj, replace
),
file_name
)
def get_target_repo(server, token, repo_name):
logger.info('Looking for library "{}"'.format(repo_name))
for repo in get_repo_list(server, token):
if repo['name'] == repo_name:
return repo
logger.info('Creating library "{}"'.format(repo_name))
return get_repo(
server, token, create_repo(server, token, repo_name)['repo_id']
)
def upload(
server, filepath,
token, login, password,
reponame, repoid, uploadurl,
repopath, fpassword, fexpiration
):
try:
logger.info('Seafile-uploader started')
if not token:
token = get_token(server, login, password)
if not repoid:
repo = get_target_repo(server, token, reponame)
repoid = repo['id']
logger.info('Uploading "{}"'.format(filepath))
_, file_name = upload_local_file(
server, token, repoid, uploadurl, repopath, filepath
)
library_file_path = repopath + file_name
logger.info('Getting share link of "{}"'.format(library_file_path))
link = get_share_link(
server, token, repoid, library_file_path,
password=fpassword, expire_days=fexpiration
)
logger.info('Success: {}'.format(link))
except Exception as err:
logger.error(err)
return err
return link
|
from django.contrib.gis.db import models
from django.utils.translation import ugettext_lazy as _
from ..reversion_utils import InitialRevisionManagerMixin
class WaterParcelManager(InitialRevisionManagerMixin, models.GeoManager):
pass
class WaterParcel(models.Model):
"""
A parcel defined by the Philadelphia Water Department.
May not be strictly the same as parcels as defined by Records.
"""
objects = WaterParcelManager()
geometry = models.MultiPolygonField(_('geometry'), blank=True, null=True)
# IDs
parcel_id = models.CharField(_('parcel id'),
max_length=20,
blank=True,
null=True,
help_text=_('The parcel ID assigned by the Water Dept'),
)
parcelid = models.IntegerField(_('parcel id (int)'),
blank=True,
null=True,
help_text=_('The parcel ID assigned by the Water Dept'),
)
brt_account = models.CharField(_('BRT account'),
max_length=20,
blank=True,
null=True,
help_text=_('The OPA/BRT account according to the Water Dept'),
)
ten_code = models.CharField(_('ten code'),
max_length=20,
blank=True,
null=True,
)
# parcel information
owner1 = models.CharField(_('owner1'),
max_length=256,
blank=True,
null=True,
)
owner2 = models.CharField(_('owner1'),
max_length=256,
blank=True,
null=True,
)
address = models.CharField(_('address'),
max_length=256,
blank=True,
null=True,
)
gross_area = models.DecimalField(_('gross area'),
max_digits=20,
decimal_places=2,
blank=True,
null=True,
help_text=_('The area of the parcel in square feet')
)
impervious_area = models.DecimalField(_('impervious area'),
max_digits=20,
decimal_places=2,
blank=True,
null=True,
help_text=_('The impervious area of the parcel in square feet')
)
# These fields will have hints to vacancy--eg, 'building_description'
# startswith 'VAC LAND'
building_type = models.CharField(_('building type'),
max_length=50,
blank=True,
null=True,
)
building_description = models.CharField(_('building description'),
max_length=100,
blank=True,
null=True,
)
building_code = models.CharField(_('building code'),
max_length=20,
blank=True,
null=True,
)
def _percent_permeable(self):
return (1 - (self.impervious_area / self.gross_area )) * 100
percent_permeable = property(_percent_permeable)
def __unicode__(self):
return '%s at %s, owned by %s, %s' % (self.parcel_id, self.address,
self.owner1, self.owner2 or '')
class WaterAccount(models.Model):
"""An account with the Philadelphia Water Department."""
water_parcel = models.ForeignKey('WaterParcel',
verbose_name=_('water parcel')
)
account_id = models.CharField(_('account ID'),
max_length=30,
blank=True,
null=True,
help_text=_('The ID of this account with the Water Department'),
)
account_number = models.CharField(_('account number'),
max_length=30,
blank=True,
null=True,
help_text=_('A slightly expanded version of the account ID'),
)
customer_id = models.CharField(_('customer ID'),
max_length=20,
blank=True,
null=True,
help_text=_('The ID for this customer with the Water Department'),
)
customer_name = models.CharField(_('customer name'),
max_length=100,
blank=True,
null=True,
)
inst_id = models.CharField(_('inst ID'),
max_length=20,
blank=True,
null=True,
)
# TODO 'Discontinued' could indicate vacancy
account_status = models.CharField(_('account status'),
max_length=30,
blank=True,
null=True,
help_text=_('Discontinued / Current'),
)
account_status_abbreviation = models.CharField(_('account status abbreviation'),
max_length=10,
blank=True,
null=True,
)
meter_size = models.CharField(_('meter size'),
max_length=30,
blank=True,
null=True,
)
meter_size_abbreviation = models.CharField(_('meter size abbreviation'),
max_length=10,
blank=True,
null=True,
)
service_type = models.CharField(_('service type'),
max_length=10,
blank=True,
null=True,
)
# TODO '3 - Stormwater Only' might indicate vacancy
service_type_label = models.CharField(_('service type label'),
max_length=50,
blank=True,
null=True,
)
stormwater_status = models.CharField(_('stormwater status'),
max_length=30,
blank=True,
null=True,
help_text=_('Billed / Not Billed'),
)
def __unicode__(self):
return '%s (%s), %s' % (self.account_id, self.account_status,
self.service_type_label)
# Mapping only includes data available in the shapefile provided by the Water
# Department. Other model fields are filled, on demand, using the API.
waterparcel_mapping = {
'parcelid' : 'PARCELID',
'ten_code' : 'TENCODE',
'address' : 'ADDRESS',
'owner1' : 'OWNER1',
'owner2' : 'OWNER2',
'building_code' : 'BLDG_CODE',
'building_description' : 'BLDG_DESC',
'brt_account' : 'BRT_ID',
'gross_area' : 'GROSS_AREA',
'geometry' : 'POLYGON',
}
|
'''OpenGL extension INGR.color_clamp
This module customises the behaviour of the
OpenGL.raw.GL.INGR.color_clamp to provide a more
Python-friendly API
Overview (from the spec)
Various RGBA color space conversions require clamping to values
in a more constrained range than [0, 1]. This extension allows
the definition of independent color clamp values for each of the
four color components as part of the Final Conversion in the pixel
transfer path for draws, reads, and copies.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/INGR/color_clamp.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INGR.color_clamp import *
from OpenGL.raw.GL.INGR.color_clamp import _EXTENSION_NAME
def glInitColorClampINGR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
#-*- coding:utf-8 -*-
from PyQt5.QtWidgets import QMainWindow, QFrame, QDesktopWidget, QApplication
from PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal
from PyQt5.QtGui import QPainter, QColor
from board import Board
class Tetris(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.tboard = Board(self)
self.setCentralWidget(self.tboard)
self.statusbar = self.statusBar()
self.tboard.msg2Statusbar[str].connect(self.statusbar.showMessage)
self.tboard.start()
self.resize(180, 380)
self.center()
self.setWindowTitle('Tetris')
self.show()
def center(self):
"""
获得显示器分辨率,将本应用移动到桌面中央
"""
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
self.move( (screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
if __name__ == '__main__':
import sys
app = QApplication([])
tetris = Tetris()
sys.exit(app.exec_())
|
# Generated by Django 2.1 on 2018-08-19 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0057_add_status_to_relief_camp"),
]
operations = [
migrations.CreateModel(
name="DataCollection",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID",),),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"document_name",
models.CharField(blank=True, max_length=255, null=True, verbose_name="Document name",),
),
("document", models.FileField(blank=True, upload_to="camp_data")),
("tag", models.CharField(blank=True, max_length=255, null=True)),
],
options={"verbose_name": "Data: Collection", "verbose_name_plural": "Data: Collections",},
),
migrations.DeleteModel(name="ReliefCampData",),
]
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Any, Dict, List, Mapping
from unittest.mock import patch
import pytest
from airbyte_cdk import AirbyteLogger
from source_s3.source_files_abstract.stream import FileStream
from .abstract_test_parser import create_by_local_file, memory_limit
LOGGER = AirbyteLogger()
class TestFileStream:
@pytest.mark.parametrize( # set return_schema to None for an expected fail
"schema_string, return_schema",
[
(
'{"id": "integer", "name": "string", "valid": "boolean", "code": "integer", "degrees": "number", "birthday": "string", "last_seen": "string"}',
{
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
),
('{"single_column": "boolean"}', {"single_column": "boolean"}),
(r"{}", {}),
('{this isn\'t right: "integer"}', None), # invalid json
('[ {"a":"b"} ]', None), # array instead of object
('{"a": "boolean", "b": {"string": "integer"}}', None), # object as a value
('{"a": ["boolean", "string"], "b": {"string": "integer"}}', None), # array and object as values
('{"a": "integer", "b": "NOT A REAL DATATYPE"}', None), # incorrect datatype
('{"a": "NOT A REAL DATATYPE", "b": "ANOTHER FAKE DATATYPE"}', None), # multiple incorrect datatypes
],
)
@memory_limit(512)
def test_parse_user_input_schema(self, schema_string: str, return_schema: str) -> None:
if return_schema is not None:
assert str(FileStream._parse_user_input_schema(schema_string)) == str(return_schema)
else:
with pytest.raises(Exception) as e_info:
FileStream._parse_user_input_schema(schema_string)
LOGGER.debug(str(e_info))
@pytest.mark.parametrize( # set expected_return_record to None for an expected fail
"target_columns, record, expected_return_record",
[
( # simple case
["id", "first_name", "last_name"],
{"id": "1", "first_name": "Frodo", "last_name": "Baggins"},
{"id": "1", "first_name": "Frodo", "last_name": "Baggins", "_ab_additional_properties": {}},
),
( # additional columns
["id", "first_name", "last_name"],
{"id": "1", "first_name": "Frodo", "last_name": "Baggins", "location": "The Shire", "items": ["The One Ring", "Sting"]},
{
"id": "1",
"first_name": "Frodo",
"last_name": "Baggins",
"_ab_additional_properties": {"location": "The Shire", "items": ["The One Ring", "Sting"]},
},
),
( # missing columns
["id", "first_name", "last_name", "location", "items"],
{"id": "1", "first_name": "Frodo", "last_name": "Baggins"},
{
"id": "1",
"first_name": "Frodo",
"last_name": "Baggins",
"location": None,
"items": None,
"_ab_additional_properties": {},
},
),
( # additional and missing columns
["id", "first_name", "last_name", "friends", "enemies"],
{"id": "1", "first_name": "Frodo", "last_name": "Baggins", "location": "The Shire", "items": ["The One Ring", "Sting"]},
{
"id": "1",
"first_name": "Frodo",
"last_name": "Baggins",
"friends": None,
"enemies": None,
"_ab_additional_properties": {"location": "The Shire", "items": ["The One Ring", "Sting"]},
},
),
],
ids=["simple_case", "additional_columns", "missing_columns", "additional_and_missing_columns"],
)
@patch(
"source_s3.source_files_abstract.stream.FileStream.__abstractmethods__", set()
) # patching abstractmethods to empty set so we can instantiate ABC to test
def test_match_target_schema(
self, target_columns: List[str], record: Dict[str, Any], expected_return_record: Mapping[str, Any]
) -> None:
fs = FileStream(dataset="dummy", provider={}, format={}, path_pattern="")
if expected_return_record is not None:
assert fs._match_target_schema(record, target_columns) == expected_return_record
else:
with pytest.raises(Exception) as e_info:
fs._match_target_schema(record, target_columns)
LOGGER.debug(str(e_info))
@pytest.mark.parametrize( # set expected_return_record to None for an expected fail
"extra_map, record, expected_return_record",
[
( # one extra field
{"friend": "Frodo"},
{"id": "1", "first_name": "Samwise", "last_name": "Gamgee"},
{"id": "1", "first_name": "Samwise", "last_name": "Gamgee", "friend": "Frodo"},
),
( # multiple extra fields
{"friend": "Frodo", "enemy": "Gollum", "loves": "PO-TAY-TOES"},
{"id": "1", "first_name": "Samwise", "last_name": "Gamgee"},
{"id": "1", "first_name": "Samwise", "last_name": "Gamgee", "friend": "Frodo", "enemy": "Gollum", "loves": "PO-TAY-TOES"},
),
( # empty extra_map
{},
{"id": "1", "first_name": "Samwise", "last_name": "Gamgee"},
{"id": "1", "first_name": "Samwise", "last_name": "Gamgee"},
),
],
ids=["one_extra_field", "multiple_extra_fields", "empty_extra_map"],
)
@patch(
"source_s3.source_files_abstract.stream.FileStream.__abstractmethods__", set()
) # patching abstractmethods to empty set so we can instantiate ABC to test
@memory_limit(512)
def test_add_extra_fields_from_map(
self, extra_map: Mapping[str, Any], record: Dict[str, Any], expected_return_record: Mapping[str, Any]
) -> None:
fs = FileStream(dataset="dummy", provider={}, format={}, path_pattern="")
if expected_return_record is not None:
assert fs._add_extra_fields_from_map(record, extra_map) == expected_return_record
else:
with pytest.raises(Exception) as e_info:
fs._add_extra_fields_from_map(record, extra_map)
LOGGER.debug(str(e_info))
@pytest.mark.parametrize(
"patterns, filepaths, expected_filepaths",
[
( # 'everything' case
"**",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
),
( # specific filetype only
"**/*.csv",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
["file.csv", "folder/file.csv", "folder/nested/file.csv"],
),
( # specific filetypes only
"**/*.csv|**/*.parquet",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
],
),
( # 'everything' only 1 level deep
"*/*",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
["folder/file.csv", "folder/file.parquet"],
),
( # 'everything' at least 1 level deep
"*/**",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
["folder/file.csv", "folder/file.parquet", "folder/nested/file.csv", "folder/nested/file.parquet", "a/b/c/d/e/f/file"],
),
( # 'everything' at least 3 levels deep
"*/*/*/**",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
["a/b/c/d/e/f/file"],
),
( # specific filetype at least 1 level deep
"*/**/*.csv",
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
["folder/file.csv", "folder/nested/file.csv"],
),
( # 'everything' with specific filename (any filetype)
"**/file.*|**/file",
[
"NOT_THIS_file.csv",
"folder/NOT_THIS_file.csv",
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
[
"file.csv",
"file.parquet",
"folder/file.csv",
"folder/file.parquet",
"folder/nested/file.csv",
"folder/nested/file.parquet",
"a/b/c/d/e/f/file",
],
),
( # specific dir / any dir / specific dir / any file
"folder/*/files/*",
[
"file.csv",
"folder/file.csv",
"wrongfolder/xyz/files/1",
"a/b/c/d/e/f/file",
"folder/abc/files/1",
"folder/abc/logs/1",
"folder/xyz/files/1",
],
["folder/abc/files/1", "folder/xyz/files/1"],
),
( # specific file prefix and filetype, anywhere
"**/prefix*.csv",
[
"file.csv",
"prefix-file.parquet",
"prefix-file.csv",
"folder/file.parquet",
"folder/nested/prefixmylovelyfile.csv",
"folder/nested/prefix-file.parquet",
],
["prefix-file.csv", "folder/nested/prefixmylovelyfile.csv"],
),
],
ids=[
"everything case",
"specific filetype only",
"specific filetypes only",
"everything only 1 level deep",
"everything at least 1 level deep",
"everything at least 3 levels deep",
"specific filetype at least 1 level deep",
"everything with specific filename (any filetype)",
"specific dir / any dir / specific dir / any file",
"specific file prefix and filetype, anywhere",
],
)
@patch(
"source_s3.source_files_abstract.stream.FileStream.__abstractmethods__", set()
) # patching abstractmethods to empty set so we can instantiate ABC to test
@memory_limit(512)
def test_pattern_matched_filepath_iterator(self, patterns: str, filepaths: List[str], expected_filepaths: List[str]) -> None:
fs = FileStream(dataset="dummy", provider={}, format={}, path_pattern=patterns)
file_infos = [create_by_local_file(filepath) for filepath in filepaths]
assert set([p.key for p in fs.pattern_matched_filepath_iterator(file_infos)]) == set(expected_filepaths)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 5 12:39:28 2022
@author: brunocr
"""
'''Define a function that removes from a given array of
integers all the values contained in a second array'''
def popper (father_array: list, popper: list)->list:
res=[]
for i, c in enumerate(father_array): #Tengo i y c porque enumerate devuelve una lista de tuplas con el index (i) y el value(c)
if c not in popper:
res.append(father_array[i])
return res
def popper2 (father_array: list, popper: list)->list:
res=[]
for value in father_array:
if value not in popper:
res.append(value)
return res
def popper3 (father_array: list, popper: list)->list:
res=father_array[:] #clave esta forma de copiar la lista para que no se piren los IDs
for value in father_array:
for plop in popper:
if value == plop:
res.remove(value)
return res
|
import logging
import os.path
import json
logger = logging.getLogger(__name__)
uri = "/isam/ssl_certificates"
def get_all_certificates (isamAppliance, check_mode=False, force=False):
"""
Get information about all certificates on the appliance
"""
import time
epoch_time = int(time.time())
certs=[]
dbs_obj = isamAppliance.invoke_get("Retrieve all certificate databases", uri)
dbs=dbs_obj['data']
for db in dbs:
pcert_obj=isamAppliance.invoke_get("Retrieve personal certificates", "{0}/{1}/personal_cert".format(uri,db['id']))
logger.info("Got object {0}".format(pcert_obj))
pcerts=pcert_obj['data']
for pcert in pcerts:
cert_epoch = int(pcert['notafter_epoch'])
certs.append({
"db_id":db['id'],
"cert_id":pcert['id'],
"issuer":pcert['issuer'],
"subject":pcert['subject'],
"type":"personal",
"exp_epoch":pcert['notafter_epoch'],
"exp_date":pcert['notafter'],
"expired":cert_epoch < epoch_time
})
scert_obj=isamAppliance.invoke_get("Retrieve signer certificates", "{0}/{1}/signer_cert".format(uri,db['id']))
scerts=scert_obj['data']
for scert in scerts:
cert_epoch = int(scert['notafter_epoch'])
certs.append({
"db_id":db['id'],
"cert_id":scert['id'],
"issuer":scert['issuer'],
"subject":scert['subject'],
"type":"signer",
"exp_epoch":scert['notafter_epoch'],
"exp_date":scert['notafter'],
"expired":cert_epoch < epoch_time})
return_obj = isamAppliance.create_return_object()
return_obj['data'] = certs
return return_obj
|
import unittest as t
from py3tftp.exceptions import BadRequest, UnacknowledgedOption
from py3tftp.tftp_parsing import (blksize_parser, parse_req, timeout_parser,
validate_req)
class TestTimeoutParser(t.TestCase):
def test_lower_bound(self):
low_val = b'-11'
with self.assertRaises(UnacknowledgedOption):
timeout_parser(low_val)
def test_upper_bound(self):
high_val = b'999'
with self.assertRaises(UnacknowledgedOption):
timeout_parser(high_val)
def test_float_within_acceptable_range(self):
val = b'20.5'
self.assertEqual(timeout_parser(val), 20.5)
def test_garbage_data(self):
val = b'\x41'
with self.assertRaises(ValueError):
timeout_parser(val)
class TestBlksizeParser(t.TestCase):
def test_lower_bound(self):
low_val = b'4'
with self.assertRaises(UnacknowledgedOption):
blksize_parser(low_val)
def test_upper_bound_capped(self):
high_val = b'70000'
self.assertEqual(blksize_parser(high_val, upper_bound=4096), 4096)
def test_int_within_acceptable_range(self):
val = b'2048'
self.assertEqual(blksize_parser(val), 2048)
def test_garbage_data(self):
val = b'\x41'
with self.assertRaises(ValueError):
blksize_parser(val)
class TestParseReq(t.TestCase):
def test_not_enough_values(self):
req = b'fname\x00'
with self.assertRaises(BadRequest):
parse_req(req)
def test_odd_number_of_opts(self):
req = b'fname\x00mode\x00opt1\x00val1\x00opt2'
fname, mode, opts = parse_req(req)
self.assertDictEqual(opts, {b'opt1': b'val1'})
def test_correct_output(self):
req = b'fname\x00mode\x00opt1\x00val1\x00opt2\x00val2'
fname, mode, opts = parse_req(req)
self.assertEqual(fname, b'fname')
self.assertEqual(mode, b'mode')
self.assertDictEqual(opts, {b'opt1': b'val1', b'opt2': b'val2'})
class TestValidateReq(t.TestCase):
def setUp(self):
self.fname = b'fname'
self.mode = b'mode'
self.opts = {b'opt1': b'val1'}
self.opt1_parser = lambda opt: opt.decode('ascii')
def test_fname_is_ascii(self):
fname, *_ = validate_req(self.fname, self.mode, self.opts)
self.assertEqual(fname, 'fname')
def test_drops_unsupported_opts(self):
_, _, opts = validate_req(
self.fname,
self.mode,
self.opts,
supported_opts={b'opt1': self.opt1_parser})
self.assertDictEqual(opts, {b'opt1': 'val1'})
def test_drops_garbage_opts(self):
_, _, opts = validate_req(
self.fname,
self.mode,
{**self.opts, **{b'opt2': 'val2'}},
supported_opts={b'opt1': self.opt1_parser})
self.assertDictEqual(opts, {b'opt1': 'val1'})
|
print("""Exercício Python 078: Faça um programa que leia 5 valores numéricos e guarde-os em uma lista.
No final, mostre qual foi o maior e o menor valor digitado e as suas respectivas posições na lista. """)
lista=[]
cont = 0
for i in range (0,5):
lista.append(str(input("digite um numero ")))
cont += 1
if cont == 1 :
maior = lista[i]
menor = lista[i]
else:
if lista[i] > maior :
maior = lista[i]
if lista[i] < menor:
maior = lista[i]
for i in range (0,5):
print(lista[i], end= " ")
print(f'\nO maior numero é {maior}')
print(f'O menor numero é {menor}')
|
import praw
import datetime
import time
from requests.exceptions import HTTPError
from praw.errors import ExceptionList, APIException, InvalidCaptcha, InvalidUser, RateLimitExceeded
import sqlite3 as sql
from bs4 import BeautifulSoup
from emailGlobals import sendEmail
from inboxHandler import readInbox
from getMatchInfo import returnSoup
import HTMLParser
def updateLiveScores(r):
ArrayOfCurrentlyRunningFixtures = getArrayOfCurrentlyRunningFixtures()
if not ArrayOfCurrentlyRunningFixtures:
return
for runningFixture in ArrayOfCurrentlyRunningFixtures:
matchThreadLink = runningFixture[0]
liveThreadLink = runningFixture[1]
try:
matchScoreUpdater(r,liveThreadLink,matchThreadLink)
except:
sendEmail("Couldn't update live score","Quitting this loop, will try again next loop.")
return
def getArrayOfCurrentlyRunningFixtures():
con = None
con = sql.connect('rCricket.db',detect_types=sql.PARSE_COLNAMES)
cur = con.cursor()
currentGMT=datetime.datetime.utcnow()
TwelveHoursAgo=currentGMT - datetime.timedelta(0,0,0,0,0,10) #is actually 10 hours ago
cur.execute("select matchThreadLink,liveThreadLink from MatchThreads where creationTime between ? and ?",(TwelveHoursAgo,currentGMT))
data=cur.fetchall()
return data
def matchScoreUpdater(r,liveThreadLink,matchThreadLink):
iFrameLink=getiFrameLink(liveThreadLink)
liveScoreText=getLiveScoreText(iFrameLink)
updateMatchThread(r,matchThreadLink,liveScoreText)
def getiFrameLink(liveThreadLink):
return liveThreadLink+'?template=iframe_desktop'
def updateMatchThread(r,matchThreadLink,liveScoreText):
submission = r.get_submission(matchThreadLink)
selfText = submission.selftext
html_parser = HTMLParser.HTMLParser()
start = selfText.find("***")
end = selfText.find("***",(start+3)) + 3
selfText = selfText[:start] + liveScoreText + selfText[end:]
selfText = html_parser.unescape(selfText)
submission.edit(selfText)
def getLiveScoreText(iFrameLink):
returnText=["","",""]
returnText[1]="***\n\n|Team|Score|\n|:---|:---|"
soup = returnSoup(iFrameLink)
for Table in soup.find_all(class_="desktopPanelContent"):
returnText[1]=returnText[1]+HTMLTableToPythonTable(Table)[1]+"\n\n"
returnText[2]=returnText[2]+HTMLTableToPythonTable(Table)[2]
index=returnText[1].find("|Batsmen|R|B|4s|6s|")+len("|Batsmen|R|B|4s|6s|")
finalReturnText=returnText[1][:index]+"\n|:---|:---|:---|:---|:---|"+returnText[1][index:]
finalReturnText=finalReturnText + returnText[2]
finalReturnText=finalReturnText+"***"
return finalReturnText
def HTMLTableToPythonTable(Table):
returnText=["","",""]
for TableRow in Table.find_all("tr"):
if len(TableRow.find_all("td"))>1:
returnText[1]=returnText[1]+"\n"
returnText[1]=returnText[1]+"|"
for TableData in TableRow.find_all("td"):
if TableData.string:
returnText[1]=returnText[1]+TableData.string+"|"
else:
returnText[1]=returnText[1]+" |"
for TableRow in Table.find_all("tr"):
if len(TableRow.find_all("td"))==1:
TableData=TableRow.find("td")
if TableData.string:
returnText[2]=returnText[2]+TableData.string+"\n\n"
return returnText
|
from selenium import webdriver
import time
PATH = "driver/geckodriver.exe"
try:
browser = webdriver.Firefox(executable_path=PATH)
print("Success.")
except Exception as e:
print(f"Browser cannot be started. ERROR {e}")
URL = "https://curso-python-selenium.netlify.app/exercicio_01.html"
browser.get(URL)
time.sleep(2)
dictP = {"h1": {"text1": "text", "text2": "text", "text3": "text"}}
for i in range(3):
p = browser.find_elements_by_tag_name("p")
dictP['h1'][f'text{i+1}'] = p[i].text
print(dictP)
browser.quit()
|
from pydantic import BaseModel, Field
class ViveTrackerMessage(BaseModel):
valid: bool = Field(default=False)
x: float = Field(default=0.0)
y: float = Field(default=0.0)
z: float = Field(default=0.0)
roll: float = Field(default=0.0)
pitch: float = Field(default=0.0)
yaw: float = Field(default=0.0)
device_name: str = Field(default="Tracker")
vel_x: float = Field(default=0.0)
vel_y: float = Field(default=0.0)
vel_z: float = Field(default=0.0)
def __repr__(self):
return f"device name: {self.device_name} -> " \
f"x: {round(self.x, 5)} | y: {round(self.y, 5)} | z: {round(self.z, 5)} | " \
f"pitch: {round(self.pitch, 5)} | yaw: {round(self.yaw, 5)} | roll: {round(self.roll, 5)}"
def __str__(self):
return self.__repr__()
|
# -*- coding: utf-8 -*-
import json
import re
from django.conf import settings
from django.utils import six
from rest_framework.parsers import JSONParser, ParseError
# Parser logic taken from vbabiy's djangorestframework-camel-case project
# https://github.com/vbabiy/djangorestframework-camel-case
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def underscoreize(data):
if isinstance(data, dict):
new_dict = {}
for key, value in data.items():
if key in ['From', 'To', 'Cc', 'Bcc']:
key = key + 'Email'
new_key = camel_to_underscore(key)
new_dict[new_key] = underscoreize(value)
return new_dict
if isinstance(data, (list, tuple)):
for i in range(len(data)):
data[i] = underscoreize(data[i])
return data
return data
class PostmarkJSONParser(JSONParser):
"""
Append "Email" to the from/to/cc/bcc keys in Postmark's JSON request. We do this because Postmark's "From" key conflicts with the Python keyword of the same name and therefore we cannot use it as a model/serializer field. By using fields named from_email, to_email, etc we can match to the renamed keys from Postmark's JSON (FromEmail, ToEmail, etc)
"""
def parse(self, stream, media_type=None, parser_context=None):
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return underscoreize(json.loads(data))
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
|
from keras.models import model_from_json
import cv2
import numpy as np
class GazeNN(object):
def __init__(self, json_model_file, h5_model_file):
json_file = open(json_model_file, 'r')
self.loaded_model_json = json_file.read()
json_file.close()
self.loaded_model = model_from_json(self.loaded_model_json)
self.loaded_model.load_weights(h5_model_file)
self.cap = cv2.VideoCapture(0)
self.face_cascade = cv2.CascadeClassifier(
'C:/Users/vasy1/AppData/Local/Continuum/anaconda3/pkgs/opencv-3.3.1-py36h20b85fd_1/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
self.loaded_model.predict(np.zeros((1, 50, 140, 1)))
def process_image(self):
ret, img = self.cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
# preprocess image
roi_gray = gray[int(y + 30):int(y + h / 1.9), x + 25:x + w - 25]
roi_gray = cv2.resize(roi_gray, (140, 50))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(5, 5))
cl1 = clahe.apply(roi_gray)
prezi = cv2.resize(cl1, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
cv2.imshow('gaze', prezi)
key_press = cv2.waitKey(1)
# prepare input for model
image = np.asarray(roi_gray)
image.resize((1, 50, 140, 1))
image = image.astype(np.float32)
image /= np.max(image) # Normalise data to [0, 1] range
prediction = self.loaded_model.predict(image, batch_size=32, verbose=0)
return np.argmax(prediction), key_press
return -1, -1
|
class Solution(object):
def movesToChessboard(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
size = len(board)
half, raw_sum, col_sum = size >> 1, 0, 0
for i in range(size):
raw_sum += board[0][i]
col_sum += board[i][0]
if size & 1 == 0:
if raw_sum != half or col_sum != half:
return -1
elif raw_sum != half and raw_sum != half + 1:
return -1
elif col_sum != half and col_sum != half + 1:
return -1
for i in range(1, size):
for j in range(1, size):
if board[i][j] != board[0][0] ^ board[i][0] ^ board[0][j]:
return -1
step_raw, step_col = 0, 0
if size & 1 == 0:
raw_ref, col_ref = board[0][0], board[0][0]
else:
raw_ref = 0 if raw_sum == half else 1
col_ref = 0 if col_sum == half else 1
for i in range(0, size):
if board[0][i] != (i & 1) ^ raw_ref:
step_raw += 1
if board[i][0] != (i & 1) ^ col_ref:
step_col += 1
if size & 1 == 0:
step_raw = min(step_raw, size - step_raw)
step_col = min(step_col, size - step_col)
return (step_raw + step_col) // 2
print(Solution().movesToChessboard([[0, 1, 1, 0], [0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 0, 1]]))
print(Solution().movesToChessboard([[0, 1], [1, 0]]))
print(Solution().movesToChessboard([[1, 0], [1, 0]]))
print(Solution().movesToChessboard([[0, 0, 1], [1, 1, 0], [1, 1, 0]]))
print(Solution().movesToChessboard([[0, 0, 1], [1, 0, 0], [0, 1, 0]]))
print(Solution().movesToChessboard([[0,0,1,0,1,1],[1,1,0,1,0,0],[1,1,0,1,0,0],[0,0,1,0,1,1],[1,1,0,1,0,0],[0,0,1,0,1,1]]))
# An N x N board contains only 0s and 1s. In each move, you can swap any 2 rows with each other, or any 2 columns with each other.
# What is the minimum number of moves to transform the board into a "chessboard" - a board where no 0s and no 1s are 4-directionally adjacent? If the task is impossible, return -1.
# Examples:
# Input: board = [[0,1,1,0],[0,1,1,0],[1,0,0,1],[1,0,0,1]]
# Output: 2
# Explanation:
# One potential sequence of moves is shown below, from left to right:
# 0110 1010 1010
# 0110 --> 1010 --> 0101
# 1001 0101 1010
# 1001 0101 0101
# The first move swaps the first and second column.
# The second move swaps the second and third row.
# Input: board = [[0, 1], [1, 0]]
# Output: 0
# Explanation:
# Also note that the board with 0 in the top left corner,
# 01
# 10
# is also a valid chessboard.
# Input: board = [[1, 0], [1, 0]]
# Output: -1
# Explanation:
# No matter what sequence of moves you make, you cannot end with a valid chessboard.
# Note:
# board will have the same number of rows and columns, a number in the range [2, 30].
# board[i][j] will be only 0s or 1s.
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/transform-to-chessboard
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
|
# Generated by Django 2.0.8 on 2018-08-27 09:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hordak", "0026_auto_20190723_0929")]
operations = [
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION update_full_account_codes()
RETURNS TRIGGER AS
$$
BEGIN
-- Set empty string codes to be NULL
UPDATE hordak_account SET code = NULL where code = '';
-- Set full code to the combination of the parent account's codes
UPDATE
hordak_account AS a
SET
full_code = (
SELECT string_agg(code, '' order by lft)
FROM hordak_account AS a2
WHERE a2.lft <= a.lft AND a2.rght >= a.rght AND a.tree_id = a2.tree_id AND code IS NOT NULL
)
WHERE tree_id IN (SELECT DISTINCT tree_id FROM hordak_account WHERE code IS NOT NULL); -- search only account trees without null codes
-- Set full codes to NULL where a parent account includes a NULL code
UPDATE
hordak_account AS a
SET
full_code = NULL
WHERE
(
SELECT COUNT(*)
FROM hordak_account AS a2
WHERE a2.lft <= a.lft AND a2.rght >= a.rght AND a.tree_id = a2.tree_id AND a2.code IS NULL
) > 0
AND full_code IS NOT NULL; -- search only account trees without null codes
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
""",
"DROP FUNCTION update_full_account_codes()",
),
]
|
#! /usr/bin/env python3
# _*_ coding:utf-8 _*_
import re
import os
import time
import operator
import serial
import json
import serial.tools.list_ports
#from serial.serialutil import *
"""
operate on windows COM
"""
def test_com():
"""test windows COM recv and send data"""
port = []
plist = list(serial.tools.list_ports.comports())
for p in plist:
pcom = list(p)
device = pcom[0]
name = pcom[1]
info = pcom[2]
print('serial_device: ' + device)
print('serial_name: ' + name)
print('serial_info: ' + info)
if re.search(r'USB-to-Serial', name) is not None:
port.append(device)
print(port)
if len(port) <= 0:
print("not find windows serial port")
return False
loop_num = 100
if 1 == len(port):
serial_fd = serial.Serial(port[0], 9600, 8, 'N', 1, 10)
if serial_fd.is_open:
print('serial already open OK')
while loop_num > 0:
serial_fd.write("send".encode('utf-8'))
time.sleep(1)
data = serial_fd.read(7)
print(data)
loop_num -= 1
else:
print("COM open NOK")
global_msg_type = {'set_config':0xE1,
'get_config':0xE2,
'set_capk': 0xE3,
'get_capk': 0xE4,
'del_capk': 0xE5,
'get_poll_mode': 0xE6,
'reset': 0xE7,
'get_serial_num': 0xE8,
'get_fw_version': 0xE9,
'get_payment_version': 0xEA,
'get_vas_version': 0xEB,
'start_transaction': 0xEC,
'get_trans_status': 0xED,
'get_trans_result': 0xEE,
'cancel_transaction': 0xEF,
'get_trans_log': 0xF0,
'clear_trans_log': 0xF1,
'close': 0xF2
}
class RS232(object):
"""
RS232 class supply operation on COM
"""
def __init__(self, devName):
"""init COM some parameter"""
self.instance = None
self.port = devName
self.baud_rate = 9600
self.byte_size = 8
self.parity = 'N'
self.stop_bits = 1
self.serial_timeout = 10
self.recv_timeout = 5
self.com_open = False
self.__msg_len = 4096 - 12 #after adding msg header and msg info, finally, not exceed 255bytes
port_list = list(serial.tools.list_ports.comports(True))
port = []
# loop all com in system
for p in port_list:
pcom = list(p)
device = pcom[0]
name = pcom[1]
if re.search(r'USB-to-Serial', name) is not None:
port.append(device)
# if only one serial com, set this default, otherwise, set devName
if 1 == len(port):
self.port = port[0]
try:
self.instance = serial.Serial(self.port, self.baud_rate,
self.byte_size, self.parity, self.stop_bits, self.serial_timeout)
except SerialException as e:
print('exception occur: %s' % e)
else:
if self.instance is not None:
if self.instance.is_open:
self.com_open = True
print('create and open COM on ' + self.port)
else:
print("assigned port NOT matched in system")
def __calc_lrc(self, content):
lrc_value = 0
for i in content:
lrc_value ^= i
return lrc_value
def __check_lrc(self, content):
lrc_value = 0
for i in content[:-1]:
lrc_value ^= i
if lrc_value == content[-1]:
return True
else:
return False
def __check_msg(self, content):
""" ensure the content is entire POS command message"""
if content is None:
return False
if operator.lt(len(content), 13):
return False
msg_len = content[4:5]*256 + content[5:6]
if msg_len == len(content[6:-2]):
data_len = content[11:12]*256 + content[12:13]
if data_len == len(content[13:-2]):
return __check_lrc(content[1:])
return False
def __get_data(self, content):
data = content[13:-2]
return data
def __create_cmd(self, msg_type, content):
""" construct POS cmd msg"""
cmd = bytearray()
content_len = 0
cmd.append(0x02) #STX
# Message Header
cmd.append(0x00) #DID, Destination ID
cmd.append(0x00) #SID, SourceID
cmd.append(0x00) #MTI, Message Type Indicator
if content is not None:
content_len = len(content) + 6 # add ETX & LRC
else:
content_len = 6 # add ETX & LRC
msg_len = int(content_len//256)
cmd.extend(msg_len.to_bytes(1, byteorder='big')) #MSB Len
msg_len = int(content_len % 256)
cmd.extend(msg_len.to_bytes(1, byteorder='big')) #LSB Len
# Message Info
cmd.append(0x01) # Type, 01-command, 02-response
cmd.append(msg_type) # ID
cmd.append(0x00) # P1
cmd.append(0x00) # P2
if content is not None:
content_len = len(content)
else:
content_len = 0 # add ETX & LRC
msg_len = int(content_len // 256)
cmd.extend(msg_len.to_bytes(1, byteorder='big')) # MSB Len
msg_len = int(content_len % 256)
cmd.extend(msg_len.to_bytes(1, byteorder='big')) # LSB Len
if content is not None:
cmd.extend(content)
cmd.append(0x03) # ETX
msg_len = int(self.__calc_lrc(cmd[1:]))
cmd.extend(msg_len.to_bytes(1, byteorder='big'))
print(cmd)
return cmd
def __send_data(self, msg_type, content):
""" send request command message """
if self.instance is None:
return False
send_data = self.__create_cmd(msg_type, content)
left_len = len(send_data)
index = 0
max_send_len = 0
while 0 < left_len:
# send data
if self.__msg_len < left_len:
max_send_len = self.__msg_len
else:
max_send_len = left_len
try:
send_len = self.instance.write(send_data[index:index+max_send_len])
except TypeError as e:
print(e)
break
else:
index += send_len
left_len -= send_len
# check send result
if 0 == left_len:
return True
else:
return False
def __recv_data(self, timeout):
""" receive com message from terminal """
data = bytearray()
if self.instance is None:
return bytes()
since = time.time()
while 0 < self.instance.inWaiting():
data.extend(self.instance.read())
if operator.gt(timeout, 0):
if (0 == len(data) and (time.time() < since + timeout)):
break
return bytes(data)
def __check_recv_response(self):
response_data = bytearray()
data = self.__recv_data(self.recv_timeout)
if operator.eq(len(data), 0):
return bytes()
if self.__check_msg(data):
response_data.extend(self.__get_data(data)) # error code
if operator.le(len(response_data), 2):
print("recv msg Invalid")
return bytes()
else:
print('recv msg OK')
return bytes(response_data)
else:
print("recv msg Invalid")
return bytes()
def __clear_cache(self):
if self.instance is not None:
self.instance.flushInput()
self.instance.flushOutput()
def setConfig(self, reader_config_object):
""" set static reader configuration """
reader_config = bytes(json.dumps(reader_config_object), "utf-8")
self.__clear_cache()
if self.__send_data(global_msg_type['set_config'], reader_config):
print('send request msg[%d] OK' % len(reader_config))
else:
print('send request msg NOK, stop')
def getConfig(self):
""" get current config values """
reader_config = {}
self.__clear_cache()
if self.__send_data(global_msg_type['get_config'], None):
data = self.__check_recv_response()
if operator.gt(len(data), 0):
reader_config = json.loads(self.__get_data(data))
return reader_config
#@property
def getCAPK(self):
""" CAPK info """
capk_tuple = ()
self.__clear_cache()
if self.__send_data(global_msg_type['get_capk'], None):
data = self.__check_recv_response()
if operator.gt(len(data), 0):
capk_config = json.loads(self.__get_data(data))
if isinstance(capk_config, dict):
for x in range(len(capk_config)):
if isinstance(x, dict):
tmp_tuple = tuple(x)
capk_tuple += tmp_tuple
return capk_tuple
def setCAPK(self, CAPK_object):
""" set CAPK """
capk_config = bytes(json.dumps(CAPK_object), "utf-8")
self.__clear_cache()
if self.__send_data(global_msg_type['set_config'], capk_config):
print('send request msg[%d] OK' % len(capk_config))
else:
print('send request msg NOK, stop')
def deleteCAPK(self, CAPK_object):
""" delete CAPK """
capk_config = bytes(json.dumps(CAPK_object), "utf-8")
self.__clear_cache()
if self.__send_data(global_msg_type['del_capk'], capk_config):
print('send request msg[%d] OK' % len(capk_config))
else:
print('send request msg NOK, stop')
def getPollingModes(self):
""" get supported polling modes """
polling_mode = bytearray()
self.__clear_cache()
if self.__send_data(global_msg_type['get_poll_mode'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
polling_mode.extend(self.__get_data(data))
return polling_mode.decode('utf-8')
def reset(self):
""" reset or reboot terminal """
self.__clear_cache()
if self.__send_data(global_msg_type['reset'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
return True
else:
print("recv msg Invalid")
return False
else:
print('send request msg NOK, stop')
return False
def getSerialNumber(self):
""" get terminal serial number """
serial_number = bytearray()
self.__clear_cache()
if self.__send_data(global_msg_type['get_serial_num'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
serial_number.extend(self.__get_data(data))
else:
print("recv msg Invalid")
else:
print('send request msg NOK, stop')
return serial_number.decode('utf-8')
def getFWVersion(self):
""" FW version installed on terminal """
fw_version = bytearray()
self.__clear_cache()
if self.__send_data(global_msg_type['get_fw_version'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
fw_version.extend(self.__get_data(data))
else:
print("recv msg Invalid")
else:
print('send request msg NOK, stop')
return fw_version.decode('utf-8')
def getPaymentAppletVersion(self):
""" return payment applet version installed on reader """
payment_version = bytearray()
self.__clear_cache()
if self.__send_data(global_msg_type['get_payment_version'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
payment_version.extend(self.__get_data(data))
else:
print("recv msg Invalid")
else:
print('send request msg NOK, stop')
return payment_version.decode('utf-8')
def getVASAppletVersion(self):
""" return VAS applet version installed on reader """
vas_version = bytearray()
self.__clear_cache()
if self.__send_data(global_msg_type['get_vas_version'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
vas_version.extend(self.__get_data(data))
else:
print("recv msg Invalid")
else:
print('send request msg NOK, stop')
return vas_version.decode('utf-8')
def startTransaction(self, transaction_start_object):
""" put terminal in vas or payment mode """
trans_config = bytes(json.dumps(transaction_start_object), "utf-8")
self.__clear_cache()
if self.__send_data(global_msg_type['start_transaction'], trans_config):
print('send request msg[%d] OK' % len(trans_config))
else:
print('send request msg NOK, stop')
def getTransactionStatus(self):
""" get transaction status inprogress, complete, or errorcode """
trans_status = {}
self.__clear_cache()
if self.__send_data(global_msg_type['get_trans_status'], None):
data = self.__check_recv_response()
if operator.gt(len(data), 0):
trans_status = json.loads(self.__get_data(data))
else:
print("recv msg Invalid")
else:
print('send request msg NOK, stop')
return trans_status
def getTransactionResult(self):
""" get transaction status which consist of vas or payment status """
trans_result = {}
self.__clear_cache()
if self.__send_data(global_msg_type['get_trans_result'], None):
data = self.__check_recv_response()
if operator.gt(len(data), 0):
trans_result = json.loads(self.__get_data(data))
else:
print("recv msg Invalid")
else:
print('send request msg NOK, stop')
return trans_result
def cancelTransaction(self):
""" cancel current transaction or vas mode and return to idle mode """
if self.__send_data(global_msg_type['cancel_transaction'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
return True
else:
return False
else:
print('send request msg NOK, stop')
return False
def getTransactionLog(self):
""" return logObject which consist of a tuple of byte strings for each apdu
logged duringvas or/ and payment transaction """
data = bytearray()
tmp_data = bytearray()
trans_log = {}
trans_log_flag = 0
self.__clear_cache()
if self.__send_data(global_msg_type['get_trans_log'], None):
since = time.time()
while True:
if 0 < self.instance.inWaiting():
data.extend(self.instance.read())
if operator.ge(len(data), 3):
if operator.eq(data[-3], 0xFF):
break
if operator.gt(time.time() - since, 3):
if operator.eq(len(data), 0): #
break
continue
if self.__check_msg(data):
tmp_data = self.__get_data(data)
if operator.gt(len(tmp_data, 1)): #error code
trans_log = {tmp_data[0:-1], tmp_data[-1:]}
return trans_log
def clearTransactionLog(self):
""" close log file for next logging session """
if self.__send_data(global_msg_type['clear_trans_log'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
return True
else:
return False
else:
print('send request msg NOK, stop')
return False
def close(self):
""" close COM connection """
self.__clear_cache()
if self.__send_data(global_msg_type['close'], None):
# recv response
data = self.__check_recv_response()
if operator.gt(len(data), 0):
if self.com_open:
self.instance.close()
self.com_open = False
print('close connection on COM ' + self.port)
else:
print('connection on COM already closed')
return True
else:
return False
else:
print('send request msg NOK, stop')
return False
"""
reader_config_object
{
"RestoreDefault":h,
"TransactionLogging":boolean,
"TransactionLogSize":h,
"TransCurrencyCode":h,
"TransCurrencyExponent":h,
"TransType":h,
"AmountAuthorized":h,
"AmountOther":h,
"TerminalCountryCode":h,
"TerminalFloorLimit":h,
"TerminalCapabilities":h,
"TerminalType":h,
"AdditionalTermCapabilities":h,
"TerminalCTLSFloorLimit":h,
"VisaTTQ":h,
"Timeout":time value in ms,
"mChipMobileSupport":h,
"ExpressPayTerminalCapabilities":h,
"TerminalCTLSTransLimit":h,
"CVMRequiredLimit":h,
"TerminalActionCodeOnline":h,
"TerminalActionCodeDefault":h,
"TerminalActionCodeDenial":h,
"pollingMode":h,
"pollingTechnology":h,
…
“VAS”: {
'Merchants' : [
{
"merchantID" : string
"url" : string
"filter" : byte string
},
…
]
}
}
transaction_start_object
{
"VASTerminalMode":h,
"ProtocolMode":h,
"AmountAuthorized":h,
"Timeout":time value in ms
"TransDate":h,
"TransTime":time value in HHMMSS,
"TransType":h,
"TransCurrencyCode":h,
"TerminalCountryCode":h,
"AccountType":int,
"AmountOther":h,
}
vas_result_object
{
"token" : h
"data' : h
"merchantID" : string
"result code" : int
}
CAPK_object
{
"RID":h,
"CAPKIndex":h,
"CAHashAlgoIndicator":h,
"CAPKAlgoIndicator":h,
"CAPKModulus":h,
"CAPKExponent":h,
"CAPKCheckSum":h,
}
transaction_result_object
{
"VASResults" : [
(one for each VAS request in transaction)
vas_result_object_1,
vas_result_object_2,
…
vas_result_object_n
]
"PaymentResult" : {
"rawdata" : h,
"track 1": h,
"track 2": h,
… some basic values
}
}
transaction_status_object
{
transactionStatus:h,
VAS and Payment specs
}
logObject
{
getLog:string,
clearLog:boolean,
}
server_response_object
{
"Online Authorization/Results":string,
}
"""
def test_interface():
#test data
reader_config_object = {
"RestoreResult": 12345678,
"TransactionLogging": True,
"TransactionLogSize": 1024,
"VAS": {
'Merchant': [{
'merchantID': "D48EF64464F332DB2E97CD7DEDEE17E82E92086B23027F4FE777A244BE536F16",
'url': "https://test.pass.mcr.com",
'filter': "5682"
},
{
'merchantID': "3F22543BAF0AC5E4ABFC25681A6EBF6EDF5AC196746C55F4D4370819FFF921C3",
'url': "",
'filter': ""
}]
}
}
CAPK_object = {}
transaction_start_object = {
"VASTerminalMode": 0x01,
"ProtocolMode": 0x00
}
print('********************************')
print('* 0 -> exit ')
print('* 1 -> setConfig ')
print('* 2 -> getConfig ')
print('* 3 -> setCAPK ')
print('* 4 -> getCAPK ')
print('* 5 -> deleteCAPK ')
print('* 6 -> getPollingModes ')
print('* 7 -> reset ')
print('* 8 -> getSerialNumber ')
print('* 9 -> getFWVersion ')
print('* A -> getPaymentAppletVersion ')
print('* B -> getVASAppletVersion ')
print('* C -> startTransaction ')
print('* D -> getTransactionStatus ')
print('* E -> getTransactionResult ')
print('* F -> cancelTransaction ')
print('* 10 -> getTransactionLog ')
print('* 11 -> clearTransactionLog ')
print('* 12 -> close ')
print('********************************')
rs232 = RS232('COM4')
while True:
print(' ')
index = input('plz choose case: ')
# setconfig
if operator.eq(index, '1'):
if rs232.setConfig(reader_config_object):
print('set config OK')
else:
print('set config NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
# getconfig
if operator.eq(index, '2'):
if rs232.setConfig(reader_config_object):
print('set config OK')
else:
print('set config NOK')
if rs232.getConfig():
print('get config OK')
else:
print('get config NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
# setCAPK
if operator.eq(index, '3'):
if rs232.setCAPK(CAPK_object):
print('set CAPK OK')
else:
print('set CAPK NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
# getCAPK
if operator.eq(index, '4'):
if rs232.getCAPK():
print('get CAPK OK')
else:
print('get CAPK NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
# deleteCAPK
if operator.eq(index, '5'):
if rs232.deleteCAPK(CAPK_object):
print('delete CAPK OK')
else:
print('delete CAPK NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
# getPollingModes
if operator.eq(index, '6'):
if rs232.getPollingModes():
print('get polling Modes OK')
else:
print('get polling Modes NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '7'):
if rs232.reset():
print('reset OK')
else:
print('reset NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '8'):
if rs232.getSerialNumber():
print('get SerialNumber OK')
else:
print('get SerialNumber NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '9'):
if rs232.getFWVersion():
print('get FWVersion OK')
else:
print('get FWVersion NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, 'A') or operator.eq(index, 'a'):
if rs232.getPaymentAppletVersion():
print('get PaymentAppletVersion OK')
else:
print('get PaymentAppletVersion NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, 'B') or operator.eq(index, 'b'):
if rs232.getVASAppletVersion():
print('get VASAppletVersion OK')
else:
print('get VASAppletVersion NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, 'C') or operator.eq(index, 'c'):
if rs232.startTransaction(transaction_start_object):
print('start Transaction OK')
else:
print('start Transaction NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, 'D') or operator.eq(index, 'd'):
if rs232.getTransactionStatus():
print('get Transaction Status OK')
else:
print('get Transaction Status NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, 'E') or operator.eq(index, 'e'):
if rs232.getTransactionResult():
print('get Transaction Result OK')
else:
print('get Transaction Result NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, 'F') or operator.eq(index, 'f'):
if rs232.cancelTransaction():
print('cancel Transaction OK')
else:
print('cancel Transaction NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '10'):
if rs232.getTransactionLog():
print('get TransactionLog OK')
else:
print('get TransactionLog NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '11'):
if rs232.clearTransactionLog():
print('clear TransactionLog OK')
else:
print('clear TransactionLog NOK')
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '12'):
if rs232.close():
print('close OK')
else:
print('close NOK')
if operator.eq(index, '0'):
break
def check_modules(modules):
#installed_modules = sys.modules.keys()
#installed_modules = os.spawn("pip freeze")
#fin, fout = popen2.popen2("sort")
installed_modules = os.popen("pip freeze").read()
for chk_mod in modules:
m = re.search(chk_mod, installed_modules, 0)
if m is not None:
print(chk_mod + ' module already installed')
else:
os.system('pip install ' + chk_mod)
if __name__ == "__main__":
#test_com()
check_modules(['pyserial',])
test_interface()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-03 18:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SpeakerWordCounts',
fields=[
('bioguide_id', models.CharField(max_length=7, primary_key=True, serialize=False)),
('crec_id', models.CharField(max_length=64)),
('date', models.DateField()),
('named_entities', models.TextField()),
('noun_chunks', models.TextField()),
],
),
]
|
#Adapted from https://github.com/hpcaitech/ColossalAI-Examples/blob/main/language/gpt/model/gpt1d.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from colossalai.utils.activation_checkpoint import checkpoint
import torch
from torch import nn as nn, Tensor
from colossalai.core import global_context as gpc
from colossalai.nn.layer.utils import divide, ACT2FN
from colossalai.utils import checkpoint
from colossalai.nn.layer import Linear1D_Col, Linear1D_Row
from colossalai.nn.layer.base_layer import ParallelLayer
from colossalai import kernel
from colossalai.kernel.cuda_native.scaled_softmax import AttnMaskType
from colossalai import nn as col_nn
class MLP1D(ParallelLayer):
def __init__(self,
in_features: int,
mlp_ratio: float,
act_func: str = 'gelu',
dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
skip_bias_add: bool = False,
):
super().__init__()
self.in_features = in_features
self.mlp_ratio = mlp_ratio
self.checkpoint = checkpoint
self.skip_bias_add = skip_bias_add
self.act = ACT2FN[act_func]
skip_dense_1_add_bias = False
# Project to mlp_ratio * h.
self.dense_1 = Linear1D_Col(
self.in_features,
int(self.mlp_ratio * self.in_features),
dtype=dtype,
gather_output=False,
skip_bias_add=skip_dense_1_add_bias,
)
# Project back to h.
self.dense_2 = Linear1D_Row(
int(self.mlp_ratio * self.in_features),
self.in_features,
dtype=dtype,
parallel_input=True,
)
self.dropout = col_nn.Dropout(dropout_prob)
def _forward(self, hidden_states: Tensor) -> Tensor:
intermediate_output = self.dense_1(hidden_states)
intermediate_output = self.act(intermediate_output)
output = self.dense_2(intermediate_output)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
class GenericSelfAttention1D(ParallelLayer):
def __init__(self,
hidden_size: int,
num_attention_heads: int,
attention_dropout_prob: float,
hidden_dropout_prob: float,
dtype=None,
checkpoint: bool = False,
max_position_embeddings=1024,
):
super().__init__()
self.hidden_size = hidden_size
self.attention_head_size = divide(hidden_size, num_attention_heads)
self.num_attention_heads_per_partition = divide(num_attention_heads, gpc.tensor_parallel_size)
self.hidden_size_per_partition = divide(hidden_size, gpc.tensor_parallel_size)
self.checkpoint = checkpoint
self.query_key_value = Linear1D_Col(
hidden_size,
3 * hidden_size,
dtype=dtype,
)
self.attention_dropout = col_nn.Dropout(attention_dropout_prob)
self.dense = Linear1D_Row(
hidden_size,
hidden_size,
dtype=dtype,
parallel_input=True,
)
self.dropout = col_nn.Dropout(hidden_dropout_prob)
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
raise NotImplementedError
def _forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
query_key_value = self.query_key_value(hidden_states)
new_qkv_shape = query_key_value.shape[:-1] + \
(self.num_attention_heads_per_partition, 3 * self.attention_head_size)
query_key_value = query_key_value.view(new_qkv_shape)
query_key_value = query_key_value.permute((0, 2, 1, 3))
query_layer, key_layer, value_layer = torch.chunk(
query_key_value, 3, dim=-1)
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = self.softmax_forward(attention_scores, attention_mask, query_layer, key_layer)
attention_scores = attention_scores.type(value_layer.dtype)
attention_probs = self.attention_dropout(attention_scores)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(1, 2)
new_context_layer_shape = context_layer.size()[
:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
return checkpoint(self._forward, hidden_states, attention_mask)
def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states, attention_mask)
else:
return self._forward(hidden_states, attention_mask)
class DeepNetSelfAttention1D(GenericSelfAttention1D):
def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout_prob: float, hidden_dropout_prob: float, dtype=None, checkpoint: bool = False, max_position_embeddings=1024):
super().__init__(hidden_size, num_attention_heads, attention_dropout_prob, hidden_dropout_prob,
dtype=dtype, checkpoint=checkpoint, max_position_embeddings=max_position_embeddings)
self.softmax = nn.Softmax(dim=-1)
max_positions = max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# causal mask
query_length, key_length = query_layer.size(-2), key_layer.size(-2)
causal_mask = self.bias[:, :, key_length - query_length: key_length, :key_length].bool()
attention_scores = torch.where(causal_mask, attention_scores, self.masked_bias.to(attention_scores))
if attention_mask is not None:
# Apply the attention mask
attention_scores = attention_scores + attention_mask
attention_scores = self.softmax(attention_scores)
return attention_scores
class FusedDeepNetSelfAttention1D(GenericSelfAttention1D):
def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout_prob: float, hidden_dropout_prob: float, dtype=None, checkpoint: bool = False, max_position_embeddings=1024):
super().__init__(hidden_size, num_attention_heads, attention_dropout_prob, hidden_dropout_prob,
dtype=dtype, checkpoint=checkpoint, max_position_embeddings=max_position_embeddings)
self.softmax = kernel.FusedScaleMaskSoftmax(input_in_fp16=True,
input_in_bf16=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
mask_func=None,
softmax_in_fp32=True,
scale=math.sqrt(self.attention_head_size))
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
return self.softmax(attention_scores, attention_mask)
class GenericDeepNetTransformerLayer1D(ParallelLayer):
def __init__(self,
hidden_size: int,
num_attention_heads: int,
act_func: str = 'gelu',
mlp_ratio: float = 4.0,
attention_dropout_prob: float = 0.,
hidden_dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
max_position_embeddings: int = 1024,
alpha: float = 1.0,
layer_norm_epsilon: float = 1e-5,
apply_post_layer_norm: bool = False,
attention=None,
layer_norm=None
):
super().__init__()
self.checkpoint = checkpoint
self.dtype = dtype
self.norm1 = layer_norm(hidden_size, eps=layer_norm_epsilon)
self.attention = attention(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
hidden_dropout_prob=hidden_dropout_prob,
dtype=dtype,
max_position_embeddings=max_position_embeddings,
checkpoint=False,
)
self.alpha = alpha
self.norm2 = layer_norm(hidden_size, eps=layer_norm_epsilon)
self.mlp = MLP1D(
in_features=hidden_size,
dropout_prob=hidden_dropout_prob,
act_func=act_func,
mlp_ratio=mlp_ratio,
dtype=dtype,
checkpoint=False,
)
def _forward(self, hidden_states, attention_mask) -> Tensor:
residual = hidden_states
attention_output = self.attention(hidden_states, attention_mask)
hidden_states = self.norm1(residual*self.alpha + attention_output)
residual = hidden_states
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = self.norm2(residual*self.alpha + feed_forward_hidden_states)
output = (hidden_states, attention_mask)
return output
def forward(self, hidden_states, attention_mask):
if self.checkpoint:
return checkpoint(self._forward, hidden_states, attention_mask)
else:
return self._forward(hidden_states, attention_mask)
class DeepNetTransformerLayer1D(GenericDeepNetTransformerLayer1D):
def __init__(self, hidden_size: int, num_attention_heads: int, act_func: str = 'gelu',
mlp_ratio: float = 4, attention_dropout_prob: float = 0, hidden_dropout_prob: float = 0,
dtype=None, checkpoint: bool = False, max_position_embeddings: int = 1024,
layer_norm_epsilon: float = 0.00001, alpha: float = 1.0):
attention = DeepNetSelfAttention1D
layer_norm = nn.LayerNorm
super().__init__(hidden_size, num_attention_heads, act_func=act_func, mlp_ratio=mlp_ratio, attention_dropout_prob=attention_dropout_prob, hidden_dropout_prob=hidden_dropout_prob, dtype=dtype,
checkpoint=checkpoint, max_position_embeddings=max_position_embeddings, layer_norm_epsilon=layer_norm_epsilon, alpha=alpha, attention=attention, layer_norm=layer_norm)
class FusedDeepNetTransformerLayer1D(GenericDeepNetTransformerLayer1D):
def __init__(self, hidden_size: int, num_attention_heads: int, act_func: str = 'gelu',
mlp_ratio: float = 4, attention_dropout_prob: float = 0, hidden_dropout_prob: float = 0,
dtype=None, checkpoint: bool = False, max_position_embeddings: int = 1024,
layer_norm_epsilon: float = 0.00001, alpha: float = 1.0):
attention = FusedDeepNetSelfAttention1D
layer_norm = kernel.LayerNorm
super().__init__(hidden_size, num_attention_heads, act_func=act_func, mlp_ratio=mlp_ratio, attention_dropout_prob=attention_dropout_prob, hidden_dropout_prob=hidden_dropout_prob, dtype=dtype,
checkpoint=checkpoint, max_position_embeddings=max_position_embeddings, layer_norm_epsilon=layer_norm_epsilon, alpha=alpha, attention=attention, layer_norm=layer_norm)
|
from pathlib import Path
import pandas as pd
from typer import secho
import os
from shutil import move
# from os import path
def read_list(list_path:Path):
if str(list_path).endswith('.csv'):
list = pd.read_csv(list_path, sep=';')
from_col = []
to_col = []
for i in list['from']:
from_col.append(i)
for i in list['to']:
to_col.append(i)
list = {}
for i in range(len(from_col)):
list[from_col[i]] = to_col[i]
return list
if str(list_path).endswith('.txt'):
f = open(list_path, encoding="utf-8")
list = {}
for i in f.readlines():
pair = i.split(';')
list[pair[0]] = pair[1]
return list
else:
secho('El formato de la lista no es soportado')
def move_files(path:Path, create_folders:bool):
for dir,cp,files in os.walk(path):
for file in files:
# print(dir)
folder_name = os.path.join(dir,str(file[:str(file).rfind('.')]))
# print(folder_name)
os.makedirs(folder_name, exist_ok=True)
if create_folders:
create_folders_inside(path)
move(os.path.join(dir,file),folder_name)
# read_list('./list.csv')
def create_folders_inside(path):
# print(path)
for dir,cp,files in os.walk(path):
# folder_name = os.path.join(dir,str(file[:str(file).rfind('.')]))
# print(dir)
if str(dir) == str(path):
# print("True")
continue
for i in ['metadata', 'access', 'preservation', 'service', 'submissionDocumentation']:
# print(os.path.join(dir, i))
os.makedirs(os.path.join(dir, i), exist_ok=True)
|
from delivery.extensions.auth.controller import create_user, save_user_photo
from delivery.extensions.auth.form import UserForm
from flask import Blueprint, redirect, render_template, request
bp = Blueprint("site", __name__)
@bp.route("/")
def index():
return render_template("index.html")
@bp.route("/about")
def about():
return render_template("about.html")
@bp.route("/restaurants")
def restaurants():
return render_template("restaurants.html")
@bp.route("/signup", methods=["POST", "GET"])
def signup():
form = UserForm()
if form.validate_on_submit():
create_user(
email=form.email.data,
password=form.password.data,
name=form.name.data,
)
photo = request.files.get("photo")
if photo:
save_user_photo(photo.filename, photo)
return redirect("/")
return render_template("userform.html", form=form)
|
import tensorflow as tf
def vgg_block(num_convs, num_channels):
blk = tf.keras.models.Sequential()
for _ in range(num_convs):
# strides默认 = (1, 1)
blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3,
padding='same',activation='relu'))
blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
return blk
def build_vggnet(conv_arch):
net = tf.keras.models.Sequential()
for (num_convs, num_channels) in conv_arch:
net.add(vgg_block(num_convs,num_channels))
net.add(tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4096,activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(4096,activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10,activation='softmax')]))
return net
def build_vgg(keyword = 'vgg11'):
if keyword == 'vgg11':
conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512)) #VGG11
if keyword == 'vgg16':
conv_arch = ((2, 64), (2, 128), (3, 256), (3, 512), (3, 512)) #VGG16
if keyword == 'vgg19':
conv_arch = ((2, 64), (2, 128), (4, 256), (4, 512), (4, 512)) #VGG16
net = build_vggnet(conv_arch)
return net
|
# Copyright 2014-2015 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Bitwise rotation of a payload
"""
from stoq.plugins import StoqDecoderPlugin
class BitwiseRotateDecoder(StoqDecoderPlugin):
def __init__(self):
super().__init__()
def activate(self, stoq):
self.stoq = stoq
super().activate()
def decode(self, payload, **kwargs):
"""
Bitwise rotation of a payload
:param bytes payload: Payload to be decoded
:param **kwargs direction: left or right rotation of bits. Defaults
to right.
:param **kwargs bits: Defaults to 4 for nibble swapping.
Valid range is 0 - 8.
:returns: Bitwise rotated payload
:rtype: list of tuples
"""
try:
if 'bits' in kwargs:
bits = kwargs['bits']
else:
bits = 4
if 'direction' in kwargs:
direction = kwargs['direction'].lower()
else:
direction = 'right'
# Ensure rotation value is between 0 and 8
if (bits < 0) or (bits > 8):
raise ValueError('Rotation out of bounds (1-8)')
payload = self.to_bytearray(payload)
payload_length = len(payload)
for index in range(payload_length):
byte_value = payload[index]
if direction == 'left':
payload[index] = (byte_value << bits | byte_value >> (8 - bits)) & 0xFF
else:
payload[index] = (byte_value >> bits | byte_value << (8 - bits)) & 0xFF
# Define the metadata we want to return
meta = {}
meta['size'] = payload_length
meta['bits'] = bits
meta['direction'] = direction
# Return the results as a list of tuples
return [(meta, payload)]
except Exception as err:
self.log.error("Unable to bitwise rotate payload: {}".format(str(err)))
return None
|
import torch
from ..utils.manifold_multi import multiprod, multitransp, multisym
from .manifold import Manifold
class Hyperbolic(Manifold):
"""
Class for Hyperbolic manifold with shape (k x N) or N
With k > 1 it applies product of k Hyperbolas
"""
def __init__(self, n, k=1):
if n < 2:
raise ValueError("Need n >= 2 Value supplied was n = {}".format(n))
if k < 1:
raise ValueError("Need k >= 1 Value supplied was k = {}".format(k))
super(Manifold, self).__init__()
# Set the dimensions of the Hyperbolic manifold
self._n = n
self._k = k
self.eps = 0
# Set dimension #TODO: confirm
self._dim = self._k * (self._n)
if k == 1:
self._size = torch.Size((n,))
else:
self._size = torch.Size((k, n))
def __str__(self):
if self._k == 1:
return "Hyperbolic manifold ({})".format(self._n)
elif self._k >= 2:
return "Product Hyperbolic manifold ({})^{}".format(
self._n, self._k)
def rand(self):
"""
Generate random Hyperbolic point in range (-0.001, 0.001)
"""
u_range = (-0.001, 0.001)
if self._k == 1:
X = torch.randn(self._n)# * (u_range[1] - u_range[0]) + u_range[0]
X[0] = torch.sqrt(1 + torch.sum(X[1:]**2))
return X
X = torch.randn(self._k, self._n)# * (u_range[1] - u_range[0]) + u_range[0]
X[:, 0] = torch.sqrt(1 + torch.sum(X[:, 1:]**2, dim=1))
return X
def _lorentz_scalar_product(self, u, v):
if u.shape == v.shape:
if len(v.shape) == 1:
val = torch.sum(u*v) - 2*u[0]*v[0]
return val
elif len(v.shape) == 2:
val = torch.sum(u*v, dim=1) - 2*u[:, 0]*v[:, 0]
return val
raise ValueError("u, v can not be {}-dimensional".format(len(v.shape)))
raise ValueError("u,v shape should be same")
def proj(self, X, U):
if self._k == 1:
return U + self._lorentz_scalar_product(X, U) * X
else:
return U + self._lorentz_scalar_product(X, U).reshape(self._k, -1) * X
def egrad2rgrad(self, X, U):
gl = torch.diag(torch.ones(self._n))
gl[0, 0] = -1
if self._k == 1:
return self.proj(X, multiprod(gl, U))
else:
return self.proj(X, multitransp(multiprod(gl, multitransp(U))))
def retr(self, X, G):
"""
retaction is same as exp
"""
return self.exp(X, G)
def exp(self, X, G):
# check for multi dimenstions
G_lnorm = self.norm(X, G)
if self._k == 1:
ex = torch.cosh(G_lnorm) * X + torch.sinh(G_lnorm) * (G/G_lnorm)
if G_lnorm == 0:
ex = X
return ex
else:
G_lnorm = G_lnorm.view(-1, 1)
ex = torch.cosh(G_lnorm) * X + torch.sinh(G_lnorm) * (G/G_lnorm)
exclude = G_lnorm == 0
exclude = exclude.view(-1)
ex[exclude, :] = X[exclude, :]
return ex
def inner(self, X, G1, G2):
return torch.sum(G1 * G2)
def norm(self, X, G):
linear_product = self._lorentz_scalar_product(G, G)
return torch.sqrt(torch.max(linear_product,
torch.ones_like(linear_product) * self.eps))
def transp(self, x1, x2, d):
return self.proj(x2, d)
def _acosh(self, x):
return torch.log(x+(x**2-1)**0.5)
def dist(self, X, Y):
# arccosh(max (1, -<X,Y>_L) )
linear_product = -1 * self._lorentz_scalar_product(X, Y)
return self._acosh(torch.max(linear_product, torch.ones_like(linear_product)))
# TODO: inner, norm transp check
|
"""
Small WSGI filter that interprets headers added by proxies to fix some values available in the request.
"""
import re
from typing import Callable, Dict, Any
SEP_RE = re.compile(r', *')
class Filter:
def __init__(self, application: Callable[[Dict[str, str], Any], Any]):
self._application = application
def __call__(self, environ: Dict[str, str], start_response: Any) -> Any:
if 'HTTP_FORWARDED' in environ:
forwarded = SEP_RE.split(environ.pop('HTTP_FORWARDED'))[0]
fields = dict(tuple(f.split('=', maxsplit=1)) for f in forwarded.split(";")) # type: ignore
if 'for' in fields:
environ['REMOTE_ADDR'] = fields['for']
if 'host' in fields:
environ['HTTP_HOST'] = fields['host']
if 'proto' in fields:
environ['wsgi.url_scheme'] = fields['proto']
# the rest is taken from paste.deploy.config.PrefixMiddleware
if 'HTTP_X_FORWARDED_SERVER' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'] = \
environ.pop('HTTP_X_FORWARDED_SERVER').split(',')[0]
if 'HTTP_X_FORWARDED_HOST' in environ:
environ['HTTP_HOST'] = environ.pop('HTTP_X_FORWARDED_HOST').split(',')[0]
if 'HTTP_X_FORWARDED_FOR' in environ:
environ['REMOTE_ADDR'] = environ.pop('HTTP_X_FORWARDED_FOR').split(',')[0]
if 'HTTP_X_FORWARDED_SCHEME' in environ:
environ['wsgi.url_scheme'] = environ.pop('HTTP_X_FORWARDED_SCHEME')
elif 'HTTP_X_FORWARDED_PROTO' in environ:
environ['wsgi.url_scheme'] = environ.pop('HTTP_X_FORWARDED_PROTO')
return self._application(environ, start_response)
|
class Job(object):
def __init__(self, pipeline_connector, **kwargs):
self.kwargs = kwargs
self._pipeline_connector = pipeline_connector
def pipeline_context(self):
return self._pipeline_connector.pipeline_context()
def serialize(self):
from foundations_internal.serializer import serialize
return serialize(self)
@staticmethod
def deserialize(serialized_self):
from foundations_internal.serializer import deserialize
return deserialize(serialized_self)
|
"""
To run:
$ python lstm_frag.py --data_path=path/to/train.list
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(0, "../src/")
from utils.vector_manager import VectorManager
import numpy as np
import tensorflow as tf
import subprocess
import inspect
import time
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string(
"tasks", "all",
"Tasks to be performed. Possible options are: all, train, test, valid")
flags.DEFINE_string(
"word_to_id_path", "../models/eos/word2id_1000.pklz",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string(
"embeddings", "../models/eos/idWordVec_",
"Embeddings path")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("save_path", None,
"Model output directory.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
def get_vocab_size():
word_to_id = VectorManager.read_vector(FLAGS.word_to_id_path)
size = len(word_to_id)
print("Vocabulary size: %s" % size)
return size
def generate_arrays_from_list(name, files, embeddings, num_steps=35, batch_size=20, embedding_size=200):
debug = False
while 1:
for file_name in files:
print("Generating from file %s for %s" % (file_name, name))
raw_list = VectorManager.parse_into_list(open(file_name).read())
n_words = len(raw_list)
batch_len = n_words // batch_size
data = np.reshape(raw_list[0:batch_size*batch_len], [batch_size, batch_len])
for i in range(0, n_words - num_steps, 1):
x = data[0:batch_size, i * num_steps:(i + 1) * num_steps]
x = [[embeddings[int(elem)][2] for elem in l] for l in x]
y = data[0:batch_size, i * num_steps + 1:(i + 1) * num_steps + 1]
if len(x[0]) < num_steps or len(y[0]) < num_steps:
break
if debug:
print("Batch size %s\nNum steps %s\nEmbedding size %s" % (batch_size, num_steps, embedding_size
))
print("Len(x): %s\n Len(x[0] %s\n Len(x[0][0] %s" % (len(x), len(x[0]), len(x[0][0])))
print("Len(y): %s\n Len(y[0] %s" % (len(y), len(y[0])))
x = np.reshape(x, newshape=(batch_size, num_steps, embedding_size))
y = np.reshape(y, newshape=(batch_size, num_steps))
yield x, y
class WPModel(object):
"""Word Prediction model."""
def __init__(self, is_training, config):
self.config = config
batch_size = config.batch_size
num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
embedding_size = config.embedding_size
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
# if 'reuse' in inspect.getargspec(
# tf.contrib.rnn.BasicLSTMCell.__init__).args:
# return tf.contrib.rnn.BasicLSTMCell(
# size, forget_bias=0.0, state_is_tuple=True,
# reuse=tf.get_variable_scope().reuse)
# else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
self.inputs = tf.placeholder(dtype=data_type(), shape=(batch_size, num_steps, embedding_size))
self.targets = tf.placeholder(dtype=tf.int32, shape=(batch_size, num_steps))
if is_training and config.keep_prob < 1:
# Dropout allows to use the net for train and testing
# See: https://stackoverflow.com/questions/34597316/why-input-is-scaled-in-tf-nn-dropout-in-tensorflow
# and: http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf
inputs = tf.nn.dropout(self.inputs, config.keep_prob)
else:
inputs = self.inputs
inputs = tf.unstack(inputs, num=num_steps, axis=1)
outputs, state = tf.contrib.rnn.static_rnn(
cell, inputs, initial_state=self._initial_state)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 1
num_steps = 20
hidden_size = 200
max_epoch = 2
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 126930
embedding_size = 200
epoch_size = 1
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 1
num_steps = 35
hidden_size = 512
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 126930
embedding_size = 200
epoch_size = 1
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 1
num_steps = 35
hidden_size = 1024
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 126930
embedding_size = 1000
epoch_size = 1
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 10
vocab_size = 126930
embedding_size = 200
epoch_size = 1
def run_epoch(session, generator, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
config = model.config
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
print("Epoch size starting training %s" % config.epoch_size)
for step in range(config.epoch_size):
x, y = next(generator)
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
feed_dict[model.inputs] = x
feed_dict[model.targets] = y
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += config.num_steps
# if verbose and step % 100 == 0:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / config.epoch_size, np.exp(costs / iters),
iters * config.batch_size / (time.time() - start_time)))
sys.stdout.flush()
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def get_epoch_size(files, config):
total = 0
for file in files:
file_words = subprocess.check_output(['wc', '-w', file])
number = file_words.split()[0]
words = int(number)
total += words - (words % (config.batch_size * config.num_steps))
print("Total words: %s, Batch size: %s, Num steps: %s" % (total, config.batch_size, config.num_steps))
sys.stdout.flush()
epoch_size = ((total // config.batch_size) - 1) // config.num_steps
return epoch_size
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to wiki data directory list")
vocab_size = 126930
config = get_config()
config.vocab_size = vocab_size
valid_config = get_config()
config.vocab_size = vocab_size
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
eval_config.vocab_size = vocab_size
print("Embeddings path: {}".format(FLAGS.embeddings))
embeddings = VectorManager.read_vector(FLAGS.embeddings)
files = open(FLAGS.data_path).read().split()
training_list = files[0:int(0.8 * len(files))]
validation_list = files[int(0.8 * len(files)):int(0.9 * len(files))]
testing_list = files[int(0.9 * len(files)):len(files)]
config.epoch_size = get_epoch_size(training_list, config)
valid_config.epoch_size = get_epoch_size(validation_list, valid_config)
eval_config.epoch_size = get_epoch_size(testing_list, eval_config)
gen_train = generate_arrays_from_list("Train", training_list, embeddings, batch_size=config.batch_size,
embedding_size=config.embedding_size, num_steps=config.num_steps)
gen_valid = generate_arrays_from_list("Validation", validation_list, embeddings, batch_size=valid_config.batch_size,
embedding_size=valid_config.embedding_size, num_steps=valid_config.num_steps)
gen_test = generate_arrays_from_list("Test", testing_list, embeddings, batch_size=eval_config.batch_size,
embedding_size=eval_config.embedding_size, num_steps=eval_config.num_steps)
print("Epoch sizes\n * Training: %s\n * Validation: %s\n * Testing: %s" %
(config.epoch_size, valid_config.epoch_size, eval_config.epoch_size))
sys.stdout.flush()
with tf.Graph().as_default():
# Args: [minval, maxval]
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = WPModel(is_training=True, config=config)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = WPModel(is_training=False, config=valid_config)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = WPModel(is_training=False, config=eval_config)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, generator=gen_train, model=m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, generator=gen_valid, model=mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, generator=gen_test, model=mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run()
|
from django.apps import AppConfig
class PofileDiffReduceConfig(AppConfig):
name = 'pofile_diff_reduce'
|
#!/usr/local/bin/python3
def main():
# Test suite
tests = [
[None, None], # Should throw a TypeError
[0, False],
[1, True],
[2, True],
[15, False],
[16, True],
[65536, True],
[65538, False]
]
for item in tests:
try:
temp_result = is_power_of_two(item[0])
if temp_result == item[1]:
print('PASSED: is_power_of_two({}) returned {}'.format(item[0], temp_result))
else:
print('FAILED: is_power_of_two({}) returned {}, should have returned {}'.format(item[0], temp_result, item[1]))
except TypeError:
print('PASSED TypeError test')
return 0
def is_power_of_two(val):
'''
Determines if val is a power of two
Input: val is a positive integer
Output: Boolean
'''
# Check input
if type(val) is not int:
raise TypeError('Input must be an integer')
# Trivial cases
if val <= 0:
return False
if val == 1:
return True
# Repeat divisions by 2 until val = 2
while val > 1:
if val % 2 == 1:
return False
val /= 2
return True
if __name__ == '__main__':
main()
|
# Lesson 6
# Sometimes, we want to protect some of the data or methods in our classes
class RestroomStall:
def __init__(self):
self._occupants = 0 # protected variables discourage use
self.__bathroom_cam = 0 # private variables prevent use
def is_occupied(self):
return self._occupants != 0
def enter(self):
if not self.is_occupied():
print('You have entered the bathroom')
self._occupants += 1
else:
print("You can't enter the bathroom, it's occupied!")
def exit(self):
if self.is_occupied():
print("You're exiting the bathroom")
self._occupants -= 1
else:
print('ERROR! Attempted restroom exit with 0 occupants!')
stall = RestroomStall()
stall.enter()
stall.exit()
stall.enter()
stall.enter()
# If we really need to, we can access protected variables,
# but the class author is trying to tell you that modification is dangerous!
stall._occupants = 0
stall.enter()
# However, we cannot access private variables!
stall.__bathroom_cam
|
import numpy as np
from numpy import load
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
train_split = 0.8
lead_time = 1
data_path = 'data/'
out_path = 'out/'
y = load(data_path + 'y.npy')
for lead_time in [1, 2, 3, 6, 12, 23]:
# Create a persistence model.
num_examples = len(y)
num_train = int(num_examples * train_split)
y_test = y[num_train+lead_time:]
pred = y[num_train:-lead_time]
# Test the model.
test_mse = mean_squared_error(y_test, pred, squared=True)
test_rmse = mean_squared_error(y_test, pred, squared=False)
print('Test MSE:', test_mse)
print("----------")
print()
fig, ax = plt.subplots(figsize=(12, 8))
plt.xlabel('Month')
plt.ylabel('SSTA')
plt.title('Persist_SSTAGraphDataset_leadtime_' + str(lead_time) + '_numsample_1679_trainsplit_0.8_MSE_' + str(round(test_mse, 4)), fontsize=12)
blue_patch = mpatches.Patch(color='blue', label='Predicted')
red_patch = mpatches.Patch(color='red', label='Observed')
ax.legend(handles=[blue_patch, red_patch])
month = np.arange(0, len(y_test), 1, dtype=int)
ax.plot(month, pred, 'o', color='blue')
ax.plot(month, y_test, 'o', color='red')
plt.savefig(out_path + 'plot_persist_SSTAGraphDataset_leadtime_' + str(lead_time) + '_numsample_1679_trainsplit_0.8.png')
print("Save the observed vs. predicted plot.")
print("--------------------")
print()
|
# -*- coding: utf-8 -*-
__all__ = [
"cublas",
]
from functools import reduce
from operator import mul
import numpy as np
# Local imports
from cublas_import import (cublas_axpy,
cublas_copy,
cublas_destroy,
cublas_gemm,
cublas_gemm_strided_batched,
cublas_init,
cublas_nrm2,
cublas_rf_batched,
cublas_ri_batched,
cublas_scal,
cublas_setstream,
cublasx_diag)
# Datatype identifier. cuBLAS currently supports these.
_blas_types = {np.dtype('f4'):0,
np.dtype('f8'):1,
np.dtype('c8'):2,
np.dtype('c16'):3,
np.dtype('f2'):4}
def check_vectors(x, y):
try:
if x.dtype != y.dtype:
raise TypeError('Dtype mismatch between vectors x and y in axpy.')
except (TypeError):
exit('Could not complete request.')
class cublas(object):
def __init__(self, stream=None):
"""
Initialize a blas handle, and tie the stream to it if
one is passed in.
Parameters
----------
stream : c_void_p (cudastream_t*), optional
CUDA stream to initialize the blas handle to.
Attributes
----------
blas_handle : c_void_p (cublasHandle_t *)
Pointer reference to cuBLAS handle.
"""
self._stream = stream
self._blas_handle = cublas_init()
if self.stream is not None:
cublas_setstream(self.handle, self.stream)
def axpy(self, alpha, x, y, xinc=1, yinc=1, n=None):
"""
cuBLAS function ax plus y.
Parameters
----------
alpha : blas_types[dtype]
Scalar used for multiplication.
x : Device_Ptr object
Device pointer object with dev_ptr to vector x.
y : Device_Ptr object
Device pointer object with dev_ptr to vector x.
xinc : int, optional
Stride between consecutive elements of x.
yinc : int, optional
Stride between consecutive elements of y.
n : int, optional
Number of elements in the vectors x and y.
"""
if type(alpha) is not np.ndarray:
alpha = np.array(alpha, dtype=x.dtype)
n = n or len(x)
check_vectors(x,y)
cublas_axpy(self.handle, n, alpha,
x.ptr, xinc,
y.ptr, yinc,
_blas_types[x.dtype])
def copy(self, x, y, xinc=1, yinc=1, n=None):
"""
cuBLAS function copy vector x to y.
Parameters
----------
x : Device_Ptr object
Device pointer object with dev_ptr to vector x.
y : Device_Ptr object
Device pointer object with dev_ptr to vector y.
xinc : int, optional
Stride between consecutive elements of x.
yinc : int, optional
Stride between consecutive elements of y.
n : int
Number of elements in the vectors x and y.
Notes
-----
This is the equivalent of doing a cuda_memcpyd2d(...)
"""
check_vectors(x,y)
n = n or len(y)
cublas_copy(self.handle, n,
x.ptr, xinc,
y.ptr, yinc,
_blas_types[x.dtype])
def diag(self, x):
batch_size, n, m = x.shape
cublasx_diag(x.ptr,
m, n, batch_size,
_blas_types[x.dtype],
self.stream)
def gemm(self, a, b, c, alpha=1., beta=0., OPA='N', OPB='N', m3m=False):
"""
cuBLAS function gemm.
Parameters
----------
alpha : blas_types[dtype]
Scalar used for multiplication.
a : Device_Ptr object
Device pointer object with dev_ptr to input matrix a.
b : Device_Ptr object
Device pointer object with dev_ptr to input matrix b.
c : Device_Ptr object
Device pointer object with dev_ptr to output matrix c.
beta : blas_types[dtype], optional
Not really sure what beta does. Documentation sets
this to zero when doing a gemm.
OPA : str, optional
CUBLAS_OP_N ('N') or CUBLAS_OP_T ('T') or CUBLAS_OP_C ('C')
OPB : str, optional
CUBLAS_OP_N ('N') or CUBLAS_OP_T ('T') or CUBLAS_OP_C ('C')
m3m : bool, optional
Use the Gaussian reduction optimization for complex
type for a small speed boost. Only supported for complex float
at this time.
Notes
-----
Dealing with cuBLAS FORTRAN style indexing:
https://peterwittek.com/cublas-matrix-c-style.html
"""
check_vectors(a,b)
check_vectors(b,c)
m, n = c.shape[-2:]
k = a.shape[-1] if OPA == 'N' else a.shape[-2]
if type(alpha) is not np.ndarray:
alpha = np.array([alpha], dtype=a.dtype)
if type(beta) is not np.ndarray:
beta = np.array([beta], dtype=a.dtype)
ldc = n
ldb = n if OPB == 'N' else k
lda = k if OPA == 'N' else m
cublas_gemm(self.handle,
{'N':0, 'T':1, 'C':2}[OPB],
{'N':0, 'T':1, 'C':2}[OPA],
n, m, k,
alpha,
b.ptr, ldb,
a.ptr, lda,
beta,
c.ptr, ldc,
_blas_types[a.dtype],
m3m)
def gemm_strided_batched(self, a, b, c, alpha=1., beta=0.,
strideA=None, strideB=None, strideC=None,
OPA='N', OPB='N', m3m=False):
"""
cuBLAS function gemm.
Parameters
----------
alpha : blas_types[dtype]
Scalar used for multiplication.
a : Device_Ptr object
Device pointer object with dev_ptr to input matrix a.
b : Device_Ptr object
Device pointer object with dev_ptr to input matrix b.
c : Device_Ptr object
Device pointer object with dev_ptr to output matrix c.
beta : blas_types[dtype], optional
Not really sure what beta does. Documentation sets
this to zero when doing a gemm.
OPA : str, optional
CUBLAS_OP_N ('N') or CUBLAS_OP_T ('T') or CUBLAS_OP_C ('C')
OPB : str, optional
CUBLAS_OP_N ('N') or CUBLAS_OP_T ('T') or CUBLAS_OP_C ('C')
m3m : bool, optional
Use the Gaussian reduction optimization for complex
type for a small speed boost. Only supported for complex float
at this time.
Notes
-----
Dealing with cuBLAS FORTRAN style indexing:
https://peterwittek.com/cublas-matrix-c-style.html
"""
check_vectors(a,b)
check_vectors(b,c)
batch_size = c.shape[0]
m, n = c.shape[-2:]
k = a.shape[-1] if OPA == 'N' else a.shape[-2]
if type(alpha) is not np.ndarray:
alpha = np.array(alpha, dtype=a.dtype)
if type(beta) is not np.ndarray:
beta = np.array(beta, dtype=a.dtype)
ldc = n
ldb = n if OPB == 'N' else k
lda = k if OPA == 'N' else m
strideA = strideA or reduce(mul,a.shape[-2:]) if len(a) > 1 else 0
strideB = strideB or reduce(mul,b.shape[-2:]) if len(b) > 1 else 0
strideC = strideC or reduce(mul,c.shape[-2:]) if len(c) > 1 else 0
cublas_gemm_strided_batched(
self.handle,
{'N':0, 'T':1, 'C':2}[OPB],
{'N':0, 'T':1, 'C':2}[OPA],
n, m, k,
alpha,
b.ptr, ldb, strideB,
a.ptr, lda, strideA,
beta,
c.ptr, ldc, strideC,
batch_size,
_blas_types[a.dtype],
m3m)
def nrm2(self, x, xinc=1, n=None):
"""
Computes the Euclidean norm of the vector x, and stores
the result on host array y.
Parameters
----------
x : Device_Ptr object
Device pointer object with dev_ptr to vector x.
xinc : int, optional
Stride between consecutive elements of x.
n : int, optional
Number of elements in the vectors x
Returns
-------
y : blas_types[dtype]
Euclidean norm of the vector x.
"""
y = np.empty(1, dtype=x.dtype)
n = n or len(x)
cublas_nrm2(self.handle, n,
x.ptr, xinc,
y,
_blas_types[x.dtype])
return y[0]
def rf_batched(self, x, p, i, n, batch_size):
cublas_rf_batched(self.handle, n,
x.ptr,
p.ptr,
i.ptr,
batch_size,
_blas_types[x.dtype])
def ri_batched(self, x, y, p, i, n, batch_size):
cublas_ri_batched(self.handle, n,
x.ptr,
p.ptr,
y.ptr,
i.ptr,
batch_size,
_blas_types[x.dtype])
def scal(self, alpha, x, xinc=1, n=None):
"""
Scales the vector x by the scalar alpha and overwrites itself
with the result.
Parameters
----------
alpha : blas_types[dtype]
Scalar used for multiplication.
x : Device_Ptr object
Device pointer object with dev_ptr to vector x.
xinc : int, optional
Stride between consecutive elements of x.
n : int, optional
Number of elements in the vectors x
"""
if type(alpha) is not np.ndarray:
alpha = np.array(alpha, dtype=x.dtype)
n = n or len(x)
cublas_scal(self.handle, n, alpha,
x.ptr, xinc,
_blas_types[x.dtype])
@property
def handle(self):
return self._blas_handle
@property
def stream(self):
return self._stream
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
cublas_destroy(self.handle)
|
from citrination_client.search.core.query.filter import Filter
from citrination_client.search.pif.query.chemical.chemical_field_query import ChemicalFieldQuery
from citrination_client.search.pif.query.chemical.composition_query import CompositionQuery
from citrination_client.search.pif.query.core.base_object_query import BaseObjectQuery
from citrination_client.search.pif.query.core.classification_query import ClassificationQuery
from citrination_client.search.pif.query.core.field_query import FieldQuery
from citrination_client.search.pif.query.core.id_query import IdQuery
from citrination_client.search.pif.query.core.process_step_query import ProcessStepQuery
from citrination_client.search.pif.query.core.property_query import PropertyQuery
from citrination_client.search.pif.query.core.quantity_query import QuantityQuery
from citrination_client.search.pif.query.core.reference_query import ReferenceQuery
from citrination_client.search.pif.query.core.source_query import SourceQuery
class PifSystemQuery(BaseObjectQuery):
"""
Class to store information about a PIF query.
"""
def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,
extract_when_missing=None, tags=None, length=None, offset=None, uid=None, updated_at=None,
names=None, ids=None, classifications=None, source=None, quantity=None, chemical_formula=None,
composition=None, properties=None, preparation=None, references=None, sub_systems=None,
query=None, **kwargs):
"""
Constructor.
:param logic: Logic for this filter. Must be equal to one of "MUST", "MUST_NOT", "SHOULD", or "OPTIONAL".
:param weight: Weight of the query.
:param simple: String with the simple search to run against all fields.
:param simple_weight: Dictionary of relative paths to their weights for simple queries.
:param extract_as: String with the alias to save this field under.
:param extract_all: Boolean setting whether all values in an array should be extracted.
:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).
:param tags: One or more :class:`FieldQuery` operations against the tags field.
:param length: One or more :class:`FieldQuery` operations against the length field.
:param offset: One or more :class:`FieldQuery` operations against the offset field.
:param uid: One or more :class:`Filter` objects with the filters against the uid field.
:param updated_at: One or more :class:`Filter` objects with filters against the time that the PIF record was last updated.
:param names: One or more :class:`FieldQuery` objects with queries against the names field.
:param ids: One or more :class:`IdQuery` objects with queries against the ids field.
:param classifications: One or more :class:`ClassificationQuery` objects with queries against the classifications field.
:param source: One or more :class:`SourceQuery` objects with queries against the source field.
:param quantity: One or more :class:`QuantityQuery` objects with queries against the quantity field.
:param chemical_formula: One or more :class:`ChemicalFieldQuery` objects with queries against the chemicalFormula field.
:param composition: One or more :class:`CompositionQuery` objects with queries against the composition field.
:param properties: One or more :class:`PropertyQuery` objects with queries against the properties field.
:param preparation: One or more :class:`ProcessStepQuery` objects with queries against the preparation field.
:param references: One or more :class:`ReferenceQuery` objects with queries against the references field.
:param sub_systems: One or more :class:`PifSystemQuery` objects with queries against the subSystems field.
:param query: One or more :class:`PifSystemQuery` objects with nested queries.
"""
super(PifSystemQuery, self).__init__(
logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,
extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,
offset=offset, **kwargs)
self._uid = None
self.uid = uid
self._updated_at = None
self.updated_at = updated_at
self._names = None
self.names = names
self._ids = None
self.ids = ids
self._classifications = None
self.classifications = classifications
self._source = None
self.source = source
self._quantity = None
self.quantity = quantity
self._chemical_formula = None
self.chemical_formula = chemical_formula
self._composition = None
self.composition = composition
self._properties = None
self.properties = properties
self._preparation = None
self.preparation = preparation
self._references = None
self.references = references
self._sub_systems = None
self.sub_systems = sub_systems
self._query = None
self.query = query
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, uid):
self._uid = self._get_object(Filter, uid)
@uid.deleter
def uid(self):
self._uid = None
@property
def updated_at(self):
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
self._updated_at = self._get_object(Filter, updated_at)
@updated_at.deleter
def updated_at(self):
self._updated_at = None
@property
def names(self):
return self._names
@names.setter
def names(self, names):
self._names = self._get_object(FieldQuery, names)
@names.deleter
def names(self):
self._names = None
@property
def ids(self):
return self._ids
@ids.setter
def ids(self, ids):
self._ids = self._get_object(IdQuery, ids)
@ids.deleter
def ids(self):
self._ids = None
@property
def classifications(self):
return self._classifications
@classifications.setter
def classifications(self, classifications):
self._classifications = self._get_object(ClassificationQuery, classifications)
@classifications.deleter
def classifications(self):
self._classifications = None
@property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = self._get_object(SourceQuery, source)
@source.deleter
def source(self):
self._source = None
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, quantity):
self._quantity = self._get_object(QuantityQuery, quantity)
@quantity.deleter
def quantity(self):
self._quantity = None
@property
def chemical_formula(self):
return self._chemical_formula
@chemical_formula.setter
def chemical_formula(self, chemical_formula):
self._chemical_formula = self._get_object(ChemicalFieldQuery, chemical_formula)
@chemical_formula.deleter
def chemical_formula(self):
self._chemical_formula = None
@property
def composition(self):
return self._composition
@composition.setter
def composition(self, composition):
self._composition = self._get_object(CompositionQuery, composition)
@composition.deleter
def composition(self):
self._composition = None
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, properties):
self._properties = self._get_object(PropertyQuery, properties)
@properties.deleter
def properties(self):
self._properties = None
@property
def preparation(self):
return self._preparation
@preparation.setter
def preparation(self, preparation):
self._preparation = self._get_object(ProcessStepQuery, preparation)
@preparation.deleter
def preparation(self):
self._preparation = None
@property
def references(self):
return self._references
@references.setter
def references(self, references):
self._references = self._get_object(ReferenceQuery, references)
@references.deleter
def references(self):
self._references = None
@property
def sub_systems(self):
return self._sub_systems
@sub_systems.setter
def sub_systems(self, sub_systems):
self._sub_systems = self._get_object(PifSystemQuery, sub_systems)
@sub_systems.deleter
def sub_systems(self):
self._sub_systems = None
@property
def query(self):
return self._query
@query.setter
def query(self, query):
self._query = self._get_object(PifSystemQuery, query)
@query.deleter
def query(self):
self._query = None
|
from twilio.rest import TwilioRestClient
import os, sys
TWILIO_NUMBER = os.environ.get('TWILIO_NUMBER')
# Helpful link content
FLASK = """
Some helpful links:
- Flask: http://bit.ly/1eU7R5M
- twilio-python: http://bit.ly/1pKlW3E
- Ngrok: https://ngrok.com/"""
TESTING = """
Interested in testing?
- py.test http://bit.ly/1UdIVR1
- coverage.py http://bit.ly/1MERWLa
- Unit testing web apps http://bit.ly/1UOSyEJ"""
CI = """
Continuous Integration (CI) and Travis resources:
- CI overview http://bit.ly/28LwM2A
- Travis http://bit.ly/28MY5Nt
- Coveralls https://coveralls.io/"""
HEROKU = """
Deployment and Heroku:
- Deploying http://bit.ly/28Ob0Nr
- Heroku http://bit.ly/28Ni1h9"""
CONTACT = """
Thanks for coming out tonight! Helpful links:
- Repo http://bit.ly/28LJgrM
- My Twitter https://twitter.com/andrewtorkbaker
- My email abaker@twilio.com"""
# Instantiate our Twilio client
client = TwilioRestClient()
def get_audience_numbers():
"""Retrieves the phone numbers of audience members who have texted in"""
# Get all the messages we've received on our Twilio number
messages = client.messages.list(To=TWILIO_NUMBER)
# Extract the 'from' number from each message we have received
audience_numbers = set()
for message in messages:
audience_numbers.add(message.from_)
return audience_numbers
def send_helpful_links(section):
"""Sends some helpful links to the audience for a section in the presentation"""
# Get the audience numbers
numbers = get_audience_numbers()
# Yes, this is dangerous and foolish
body = eval(section.upper())
for number in numbers:
client.messages.create(
to=number,
from_=TWILIO_NUMBER,
body=body,
)
print('Sent helpful links to {} people'.format(len(numbers)))
if __name__ == '__main__':
send_helpful_links(sys.argv[1])
|
# Copyright 2022 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the condition transform.
"""
from functools import wraps
from typing import Type
from pennylane.measurements import MeasurementValue
from pennylane.operation import AnyWires, Operation
from pennylane.transforms import make_tape
class ConditionalTransformError(ValueError):
"""Error for using qml.cond incorrectly"""
class Conditional(Operation):
"""A Conditional Operation.
Unless you are a Pennylane plugin developer, **you should NOT directly use this class**,
instead, use the :func:`qml.cond <.cond>` function.
The ``Conditional`` class is a container class that defines an operation
that should by applied relative to a single measurement value.
Support for executing ``Conditional`` operations is device-dependent. If a
device doesn't support mid-circuit measurements natively, then the QNode
will apply the :func:`defer_measurements` transform.
Args:
expr (MeasurementValue): the measurement outcome value to consider
then_op (Operation): the PennyLane operation to apply conditionally
do_queue (bool): indicates whether the operator should be
recorded when created in a tape context
id (str): custom label given to an operator instance,
can be useful for some applications where the instance has to be identified
"""
num_wires = AnyWires
def __init__(
self,
expr: MeasurementValue[bool],
then_op: Type[Operation],
do_queue=True,
id=None,
):
self.meas_val = expr
self.then_op = then_op
super().__init__(wires=then_op.wires, do_queue=do_queue, id=id)
def cond(condition, true_fn, false_fn=None):
"""Condition a quantum operation on the results of mid-circuit qubit measurements.
Support for using :func:`~.cond` is device-dependent. If a device doesn't
support mid-circuit measurements natively, then the QNode will apply the
:func:`defer_measurements` transform.
Args:
condition (.MeasurementValue[bool]): a conditional expression involving a mid-circuit
measurement value (see :func:`.pennylane.measure`)
true_fn (callable): The quantum function of PennyLane operation to
apply if ``condition`` is ``True``
false_fn (callable): The quantum function of PennyLane operation to
apply if ``condition`` is ``False``
Returns:
function: A new function that applies the conditional equivalent of ``true_fn``. The returned
function takes the same input arguments as ``true_fn``.
**Example**
.. code-block:: python3
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def qnode(x, y):
qml.Hadamard(0)
m_0 = qml.measure(0)
qml.cond(m_0, qml.RY)(x, wires=1)
qml.Hadamard(2)
qml.RY(-np.pi/2, wires=[2])
m_1 = qml.measure(2)
qml.cond(m_1 == 0, qml.RX)(y, wires=1)
return qml.expval(qml.PauliZ(1))
.. code-block :: pycon
>>> first_par = np.array(0.3, requires_grad=True)
>>> sec_par = np.array(1.23, requires_grad=True)
>>> qnode(first_par, sec_par)
tensor(0.32677361, requires_grad=True)
.. note::
If the first argument of ``cond`` is a measurement value (e.g., ``m_0``
in ``qml.cond(m_0, qml.RY)``), then ``m_0 == 1`` is considered
internally.
.. details::
:title: Usage Details
**Conditional quantum functions**
The ``cond`` transform allows conditioning quantum functions too:
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
def qfunc(par, wires):
qml.Hadamard(wires[0])
qml.RY(par, wires[0])
@qml.qnode(dev)
def qnode(x):
qml.Hadamard(0)
m_0 = qml.measure(0)
qml.cond(m_0, qfunc)(x, wires=[1])
return qml.expval(qml.PauliZ(1))
.. code-block :: pycon
>>> par = np.array(0.3, requires_grad=True)
>>> qnode(par)
tensor(0.3522399, requires_grad=True)
**Passing two quantum functions**
In the qubit model, single-qubit measurements may result in one of two
outcomes. Such measurement outcomes may then be used to create
conditional expressions.
According to the truth value of the conditional expression passed to
``cond``, the transform can apply a quantum function in both the
``True`` and ``False`` case:
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
def qfunc1(x, wires):
qml.Hadamard(wires[0])
qml.RY(x, wires[0])
def qfunc2(x, wires):
qml.Hadamard(wires[0])
qml.RZ(x, wires[0])
@qml.qnode(dev)
def qnode1(x):
qml.Hadamard(0)
m_0 = qml.measure(0)
qml.cond(m_0, qfunc1, qfunc2)(x, wires=[1])
return qml.expval(qml.PauliZ(1))
.. code-block :: pycon
>>> par = np.array(0.3, requires_grad=True)
>>> qnode1(par)
tensor(-0.1477601, requires_grad=True)
The previous QNode is equivalent to using ``cond`` twice, inverting the
conditional expression in the second case using the ``~`` unary
operator:
.. code-block:: python3
@qml.qnode(dev)
def qnode2(x):
qml.Hadamard(0)
m_0 = qml.measure(0)
qml.cond(m_0, qfunc1)(x, wires=[1])
qml.cond(~m_0, qfunc2)(x, wires=[1])
return qml.expval(qml.PauliZ(1))
.. code-block :: pycon
>>> qnode2(par)
tensor(-0.1477601, requires_grad=True)
**Quantum functions with different signatures**
It may be that the two quantum functions passed to ``qml.cond`` have
different signatures. In such a case, ``lambda`` functions taking no
arguments can be used with Python closure:
.. code-block:: python3
dev = qml.device("default.qubit", wires=2)
def qfunc1(x, wire):
qml.Hadamard(wire)
qml.RY(x, wire)
def qfunc2(x, y, z, wire):
qml.Hadamard(wire)
qml.Rot(x, y, z, wire)
@qml.qnode(dev)
def qnode(a, x, y, z):
qml.Hadamard(0)
m_0 = qml.measure(0)
qml.cond(m_0, lambda: qfunc1(a, wire=1), lambda: qfunc2(x, y, z, wire=1))()
return qml.expval(qml.PauliZ(1))
.. code-block :: pycon
>>> par = np.array(0.3, requires_grad=True)
>>> x = np.array(1.2, requires_grad=True)
>>> y = np.array(1.1, requires_grad=True)
>>> z = np.array(0.3, requires_grad=True)
>>> qnode(par, x, y, z)
tensor(-0.30922805, requires_grad=True)
"""
if callable(true_fn):
# We assume that the callable is an operation or a quantum function
with_meas_err = (
"Only quantum functions that contain no measurements can be applied conditionally."
)
@wraps(true_fn)
def wrapper(*args, **kwargs):
# We assume that the callable is a quantum function
# 1. Apply true_fn conditionally
tape = make_tape(true_fn)(*args, **kwargs)
if tape.measurements:
raise ConditionalTransformError(with_meas_err)
for op in tape.operations:
Conditional(condition, op)
if false_fn is not None:
# 2. Apply false_fn conditionally
else_tape = make_tape(false_fn)(*args, **kwargs)
if else_tape.measurements:
raise ConditionalTransformError(with_meas_err)
inverted_condition = ~condition
for op in else_tape.operations:
Conditional(inverted_condition, op)
else:
raise ConditionalTransformError(
"Only operations and quantum functions with no measurements can be applied conditionally."
)
return wrapper
|
"""
Calculate the subcomponents that will later be combined into the moving foreground.
"""
from baboon_tracking.stages.motion_detector.generate_mask_subcomponents.foreground.foreground import (
Foreground,
)
from baboon_tracking.stages.motion_detector.generate_mask_subcomponents.generate_history_of_dissimilarity import (
GenerateHistoryOfDissimilarity,
)
from pipeline import Parallel
class GenerateMaskSubcomponents(Parallel):
"""
Calculate the subcomponents that will later be combined into the moving foreground.
"""
def __init__(self) -> None:
Parallel.__init__(
self,
"GenerateMaskSubcomponents",
GenerateHistoryOfDissimilarity,
Foreground,
)
|
#######################################################################################################################################################
#######################################################################Imports#########################################################################
#######################################################################################################################################################
#from itertools import product # forms cartesian products
#from tqdm import tqdm_notebook as tqdm
#import pickle
import numpy as np
import pandas as pd
import scipy as sp
from functools import reduce
from more_itertools import random_product
#import math
from joblib import Parallel, delayed
from collections.abc import Iterable
#from scipy.integrate import quad
from sklearn.model_selection import train_test_split
#from sklearn.metrics import accuracy_score, log_loss, roc_auc_score, f1_score, mean_absolute_error, r2_score
from similaritymeasures import frechet_dist, area_between_two_curves, dtw
import tensorflow as tf
import random
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
#udf import
#from utilities.LambdaNet import *
from utilities.metrics import *
from utilities.utility_functions import *
import copy
#######################################################################################################################################################
#############################################################Setting relevant parameters from current config###########################################
#######################################################################################################################################################
def initialize_LambdaNet_config_from_curent_notebook(config):
try:
globals().update(config['data'])
except KeyError:
print(KeyError)
try:
globals().update(config['lambda_net'])
except KeyError:
print(KeyError)
try:
globals().update(config['i_net'])
except KeyError:
print(KeyError)
try:
globals().update(config['evaluation'])
except KeyError:
print(KeyError)
try:
globals().update(config['computation'])
except KeyError:
print(KeyError)
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
if int(tf.__version__[0]) >= 2:
tf.random.set_seed(RANDOM_SEED)
else:
tf.set_random_seed(RANDOM_SEED)
global list_of_monomial_identifiers
from utilities.utility_functions import flatten, rec_gen, gen_monomial_identifier_list
list_of_monomial_identifiers_extended = []
if laurent:
variable_sets = [list(flatten([[_d for _d in range(d+1)], [-_d for _d in range(1, neg_d+1)]])) for _ in range(n)]
list_of_monomial_identifiers_extended = rec_gen(variable_sets)
if len(list_of_monomial_identifiers_extended) < 500:
print(list_of_monomial_identifiers_extended)
list_of_monomial_identifiers = []
for monomial_identifier in tqdm(list_of_monomial_identifiers_extended):
if np.sum(monomial_identifier) <= d:
if monomial_vars == None or len(list(filter(lambda x: x != 0, monomial_identifier))) <= monomial_vars:
list_of_monomial_identifiers.append(monomial_identifier)
else:
variable_list = ['x'+ str(i) for i in range(n)]
list_of_monomial_identifiers = gen_monomial_identifier_list(variable_list, d, n)
#######################################################################################################################################################
##################################################################Lambda Net Wrapper###################################################################
#######################################################################################################################################################
class LambdaNetDataset():
lambda_net_list = None
weight_list = None
train_settings_list = None
index_list = None
target_polynomial_list = None
lstsq_lambda_pred_polynomial_list = None
lstsq_target_polynomial_list = None
X_test_data_list = None
y_test_data_list = None
def __init__(self, lambda_net_list):
self.lambda_net_list = lambda_net_list
self.weight_list = [lambda_net.weights for lambda_net in lambda_net_list]
self.train_settings_list = {}
for key in lambda_net_list[0].train_settings.keys():
self.train_settings_list[key] = []
for lambda_net in lambda_net_list:
for key in lambda_net.train_settings.keys():
self.train_settings_list[key].append(lambda_net.train_settings[key])
self.index_list = [lambda_net.index for lambda_net in lambda_net_list]
self.target_polynomial_list = [lambda_net.target_polynomial for lambda_net in lambda_net_list]
self.lstsq_lambda_pred_polynomial_list = [lambda_net.lstsq_lambda_pred_polynomial for lambda_net in lambda_net_list]
self.lstsq_target_polynomial_list = [lambda_net.lstsq_target_polynomial for lambda_net in lambda_net_list]
self.X_test_data_list = [lambda_net.X_test_data for lambda_net in lambda_net_list]
self.y_test_data_list = [lambda_net.y_test_data for lambda_net in lambda_net_list]
def __repr__(self):
return str(self.as_pandas().head())
def __str__(self):
return str(self.as_pandas().head())
def __len__(self):
return len(self.lambda_net_list)
def make_prediction_on_dataset(self, evaluation_dataset):
assert evaluation_dataset.shape[1] == n
lambda_network_preds_list = []
for weights in self.weight_list:
lambda_network_preds = weights_to_pred(weights, evaluation_dataset)
lambda_network_preds_list.append(lambda_network_preds)
return np.array(lambda_network_preds_list)
def make_prediction_on_test_data(self):
lambda_network_preds_list = []
for lambda_net in self.lambda_net_list:
lambda_network_preds = lambda_net.make_prediction_on_test_data()
lambda_network_preds_list.append(lambda_network_preds)
return np.array(lambda_network_preds_list)
def return_target_poly_fvs_on_dataset(self, evaluation_dataset, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
assert evaluation_dataset.shape[1] == n, 'evaluation dataset has wrong shape ' + str(evaluation_dataset.shape) + ' but required (x, ' + str(n) + ')'
target_poly_fvs_list = parallel_fv_calculation_from_polynomial(self.target_polynomial_list, [evaluation_dataset for _ in range(len(self.target_polynomial_list))], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return np.array(target_poly_fvs_list)
def return_target_poly_fvs_on_test_data(self, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
target_poly_fvs_list = parallel_fv_calculation_from_polynomial(self.target_polynomial_list, self.X_test_data_list, force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return np.array(target_poly_fvs_list)
def return_lstsq_lambda_pred_polynomial_fvs_on_dataset(self, evaluation_dataset, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
assert evaluation_dataset.shape[1] == n, 'evaluation dataset has wrong shape ' + str(evaluation_dataset.shape) + ' but required (x, ' + str(n) + ')'
lstsq_lambda_pred_polynomial_fvs_list = parallel_fv_calculation_from_polynomial(self.lstsq_lambda_pred_polynomial_list, [evaluation_dataset for _ in range(len(self.target_polynomial_list))], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return np.array(lstsq_lambda_pred_polynomial_fvs_list)
def return_lstsq_lambda_pred_polynomial_fvs_on_test_data(self, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
lstsq_lambda_pred_polynomial_fvs_list = parallel_fv_calculation_from_polynomial(self.lstsq_lambda_pred_polynomial_list, self.X_test_data_list, force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return np.array(lstsq_lambda_pred_polynomial_fvs_list)
def return_lstsq_target_polynomial_fvs_on_dataset(self, evaluation_dataset, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
assert evaluation_dataset.shape[1] == n, 'evaluation dataset has wrong shape ' + str(evaluation_dataset.shape) + ' but required (x, ' + str(n) + ')'
lstsq_target_polynomial_fvs_list = parallel_fv_calculation_from_polynomial(self.lstsq_target_polynomial_list, [evaluation_dataset for _ in range(len(self.target_polynomial_list))], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return np.array(lstsq_target_polynomial_fvs_list)
def return_lstsq_target_polynomial_fvs_on_test_data(self, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
lstsq_target_polynomial_fvs_list = parallel_fv_calculation_from_polynomial(self.lstsq_target_polynomial_list, self.X_test_data_list, force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return np.array(lstsq_target_polynomial_fvs_list)
def as_pandas(self):
lambda_dataframe = pd.DataFrame(data=[lambda_net.as_array() for lambda_net in self.lambda_net_list],
columns=self.lambda_net_list[0].return_column_names(),
index=[lambda_net.index for lambda_net in self.lambda_net_list])
lambda_dataframe['seed'] = lambda_dataframe['seed'].astype(int)
return lambda_dataframe
def get_lambda_nets_by_seed(self, seed_list):
lambda_nets_by_seed = []
for lambda_net in self.lambda_net_list:
if lambda_net.train_settings['seed'] in seed_list:
lambda_nets_by_seed.append(lambda_net)
return LambdaNetDataset(lambda_nets_by_seed)
def get_lambda_nets_by_lambda_index(self, lambda_index_list):
lambda_nets_by_lambda_index = []
for lambda_net in self.lambda_net_list:
if lambda_net.index in lambda_index_list:
lambda_nets_by_lambda_index.append(lambda_net)
return LambdaNetDataset(lambda_nets_by_lambda_index)
def get_lambda_net_by_lambda_index(self, lambda_index):
for lambda_net in self.lambda_net_list:
if lambda_net.index in lambda_index:
return lambda_net
return None
def sample(self, size, seed=42):
assert isinstance(size, int) or isinstance(size, float), 'Wrong sample size specified'
random.seed(seed)
sample_lambda_net_list = None
if isinstance(size, int):
sample_lambda_net_list = random.sample(self.lambda_net_list, size)
elif isinstance(size, float):
size = int(np.round(len(self.lambda_net_list)*size))
sample_lambda_net_list = random.sample(self.lambda_net_list, size)
return LambdaNetDataset(sample_lambda_net_list)
class LambdaNet():
weights = None
model = None
train_settings = None
index = None
target_polynomial = None
lstsq_lambda_pred_polynomial = None
lstsq_target_polynomial = None
X_test_data = None
y_test_data = None
def __init__(self, line_weights, line_X_data, line_y_data):
assert isinstance(line_weights, np.ndarray), 'line is no array: ' + str(line_weights)
from utilities.utility_functions import shaped_network_parameters_to_array, normal_neural_net, shape_flat_network_parameters, generate_base_model
self.index = int(line_weights[0])
try:
self.train_settings = {'seed': int(line_weights[1])}
except ValueError:
self.train_settings = {'seed': -1}
self.target_polynomial = line_weights[range(2, sparsity+2)].astype(float)
self.lstsq_lambda_pred_polynomial = line_weights[range(sparsity+2, sparsity*2+2)].astype(float)
self.lstsq_target_polynomial = line_weights[range(sparsity*2+2, sparsity*3+2)].astype(float)
assert self.target_polynomial.shape[0] == sparsity, 'target polynomial has incorrect shape ' + str(self.target_polynomial.shape[0]) + ' but should be ' + str(sparsity)
assert self.lstsq_lambda_pred_polynomial.shape[0] == sparsity, 'lstsq lambda pred polynomial has incorrect shape ' + str(self.lstsq_lambda_pred_polynomial.shape[0]) + ' but should be ' + str(sparsity)
assert self.lstsq_target_polynomial.shape[0] == sparsity, 'lstsq target polynomial has incorrect shape ' + str(self.lstsq_target_polynomial.shape[0]) + ' but should be ' + str(sparsity)
self.weights = line_weights[sparsity*3+2:].astype(float)
assert self.weights.shape[0] == number_of_lambda_weights, 'weights have incorrect shape ' + str(self.weights.shape[0]) + ' but should be ' + str(number_of_lambda_weights)
line_X_data = line_X_data[1:]
line_y_data = line_y_data[1:]
self.X_test_data = np.transpose(np.array([line_X_data[i::n] for i in range(n)]))
self.y_test_data = line_y_data.reshape(-1,1)
if normalize_lambda_nets:
if self.index == 1:
print('NORMALIZE PRE')
print(self.weights)
print(weights_to_pred(self.weights, self.X_test_data[:5]))
self.weights = shaped_network_parameters_to_array(normal_neural_net(shape_flat_network_parameters(copy.deepcopy(self.weights), generate_base_model().get_weights())))
if self.index == 1:
print('NORMALIZE AFTER')
print(self.weights)
print(weights_to_pred(self.weights, self.X_test_data[:5]))
def __repr__(self):
return str(self.weights)
def __str__(self):
return str(self.weights)
def make_prediction_on_dataset(self, evaluation_dataset):
assert evaluation_dataset.shape[1] == n
lambda_network_preds = weights_to_pred(self.weights, evaluation_dataset)
return lambda_network_preds
def make_prediction_on_test_data(self):
lambda_network_preds = weights_to_pred(self.weights, self.X_test_data)
return lambda_network_preds
def return_target_poly_fvs_on_dataset(self, evaluation_dataset, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
assert evaluation_dataset.shape[1] == n, 'evaluation dataset has wrong shape ' + str(evaluation_dataset.shape) + ' but required (x, ' + str(n) + ')'
target_poly_fvs = parallel_fv_calculation_from_polynomial([self.target_polynomial], [evaluation_dataset], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return target_poly_fvs
def return_target_poly_fvs_on_test_data(self, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
target_poly_fvs = parallel_fv_calculation_from_polynomial([self.target_polynomial], [self.X_test_data], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return target_poly_fvs
def return_lstsq_lambda_pred_polynomial_fvs_on_dataset(self, evaluation_dataset, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
assert evaluation_dataset.shape[1] == n, 'evaluation dataset has wrong shape ' + str(evaluation_dataset.shape) + ' but required (x, ' + str(n) + ')'
lstsq_lambda_pred_polynomial_fvs = parallel_fv_calculation_from_polynomial([self.lstsq_lambda_pred_polynomial], [evaluation_dataset], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return lstsq_lambda_pred_polynomial_fvs
def return_lstsq_lambda_pred_polynomial_fvs_on_test_data(self, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
lstsq_lambda_pred_polynomial_fvs = parallel_fv_calculation_from_polynomial([self.lstsq_lambda_pred_polynomial], [self.X_test_data], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return lstsq_lambda_pred_polynomial_fvs
def return_lstsq_target_polynomial_fvs_on_dataset(self, evaluation_dataset, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
assert evaluation_dataset.shape[1] == n, 'evaluation dataset has wrong shape ' + str(evaluation_dataset.shape) + ' but required (x, ' + str(n) + ')'
lstsq_target_polynomial_fvs = parallel_fv_calculation_from_polynomial([self.lstsq_target_polynomial], [evaluation_dataset], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return lstsq_target_polynomial_fvs
def return_lstsq_target_polynomial_fvs_on_test_data(self, n_jobs_parallel_fv=10, backend='threading'):
from utilities.utility_functions import parallel_fv_calculation_from_polynomial
lstsq_target_polynomial_fvs = parallel_fv_calculation_from_polynomial([self.lstsq_target_polynomial], [self.X_test_data], force_complete_poly_representation=True, n_jobs_parallel_fv=10, backend='threading')
return lstsq_target_polynomial_fvs
def as_pandas(self):
columns = return_column_names(self)
data = as_array(self)
df = pd.DataFrame(data=data, columns=columns, index=[self.index])
df['seed'] = df['seed'].astype(int)
return df
def as_array(self):
data = np.hstack([self.train_settings['seed'], self.target_polynomial, self.lstsq_lambda_pred_polynomial, self.lstsq_target_polynomial, self.weights])
return data
def return_column_names(self):
from utilities.utility_functions import flatten
list_of_monomial_identifiers_string = [''.join(str(e) for e in monomial_identifier) for monomial_identifier in list_of_monomial_identifiers] if n > 1 else [str(monomial_identifier[0]) for monomial_identifier in list_of_monomial_identifiers]
target_polynomial_identifiers = [monomial_identifiers + str('-target') for monomial_identifiers in list_of_monomial_identifiers_string]
lstsq_lambda_pred_polynomial_identifiers = [monomial_identifiers + str('-lstsq_lambda') for monomial_identifiers in list_of_monomial_identifiers_string]
lstsq_target_polynomial_identifiers = [monomial_identifiers + str('-lstsq_target') for monomial_identifiers in list_of_monomial_identifiers_string]
weight_identifiers = ['wb_' + str(i) for i in range(self.weights.shape[0])]
columns = list(flatten(['seed', target_polynomial_identifiers, lstsq_lambda_pred_polynomial_identifiers, lstsq_target_polynomial_identifiers, weight_identifiers]))
return columns
def return_model(self, config=None):
model = weights_to_model(self.weights, config)
return model
def split_LambdaNetDataset(dataset, test_split, random_seed='RANDOM_SEED'):
if random_seed == 'RANDOM_SEED':
random_seed = RANDOM_SEED
assert isinstance(dataset, LambdaNetDataset)
lambda_nets_list = dataset.lambda_net_list
if len(lambda_nets_list) == test_split:
return None, dataset
elif isinstance(test_split, int) or isinstance(test_split, float):
lambda_nets_train_list, lambda_nets_test_list = train_test_split(lambda_nets_list, test_size=test_split, random_state=random_seed)
elif isinstance(test_split, list):
lambda_nets_test_list = [lambda_nets_list[i] for i in test_split]
lambda_nets_train_list = list(set(lambda_nets_list) - set(lambda_nets_test_list))
#lambda_nets_train_list = lambda_nets_list.copy()
#for i in sorted(test_split, reverse=True):
# del lambda_nets_train_list[i]
assert len(lambda_nets_list) == len(lambda_nets_train_list) + len(lambda_nets_test_list)
return LambdaNetDataset(lambda_nets_train_list), LambdaNetDataset(lambda_nets_test_list)
def generate_base_model(): #without dropout
base_model = Sequential()
base_model.add(Dense(lambda_network_layers[0], activation='relu', input_dim=n))
for neurons in lambda_network_layers[1:]:
base_model.add(Dense(neurons, activation='relu'))
base_model.add(Dense(1))
return base_model
def shape_flat_weights(flat_weights, target_weights):
from utilities.utility_functions import flatten
#print('shape_flat_weights')
shaped_weights =[]
start = 0
for el in target_weights:
target_shape = el.shape
size = len(list(flatten(el)))
shaped_el = np.reshape(flat_weights[start:start+size], target_shape)
shaped_weights.append(shaped_el)
start += size
return shaped_weights
def weights_to_pred(weights, x, base_model=None):
if base_model is None:
base_model = generate_base_model()
else:
base_model = tf.keras.models.clone_model(base_model)
base_model_weights = base_model.get_weights()
# Shape weights (flat) into correct model structure
shaped_weights = shape_flat_weights(weights, base_model_weights)
model = tf.keras.models.clone_model(base_model)
# Make prediction
model.set_weights(shaped_weights)
y = model.predict(x).ravel()
return y
def weights_to_model(weights, config=None, base_model=None):
#print('W-FUNCTION START')
if config != None:
globals().update(config)
if base_model is None:
base_model = Sequential()
#kerase defaults: kernel_initializer='glorot_uniform', bias_initializer='zeros'
if fixed_initialization_lambda_training:
base_model.add(Dense(lambda_network_layers[0], activation='relu', input_dim=n, kernel_initializer=tf.keras.initializers.GlorotUniform(seed=current_seed), bias_initializer='zeros'))
else:
base_model.add(Dense(lambda_network_layers[0], activation='relu', input_dim=n))
if dropout > 0:
base_model.add(Dropout(dropout))
for neurons in lambda_network_layers[1:]:
if fixed_initialization_lambda_training:
base_model.add(Dense(neurons, activation='relu', kernel_initializer=tf.keras.initializers.GlorotUniform(seed=current_seed), bias_initializer='zeros'))
else:
base_model.add(Dense(neurons, activation='relu'))
if dropout > 0:
base_model.add(Dropout(dropout))
if fixed_initialization_lambda_training:
base_model.add(Dense(1, kernel_initializer=tf.keras.initializers.GlorotUniform(seed=self.train_settings['seed']), bias_initializer='zeros'))
else:
base_model.add(Dense(1))
else:
base_model = tf.keras.models.clone_model(base_model)
base_model_weights = base_model.get_weights()
# Shape weights (flat) into correct model structure
shaped_weights = shape_flat_weights(weights, base_model_weights)
model = tf.keras.models.clone_model(base_model)
model.set_weights(shaped_weights)
model.compile(optimizer=optimizer_lambda,
loss=loss_lambda,
metrics=[r2_keras_loss, 'mae', tf.keras.metrics.RootMeanSquaredError()])
return model
#######################################################################################################################################################
#################################################################Lambda Net TRAINING###################################################################
#######################################################################################################################################################
def train_nn(lambda_index,
X_data_lambda,
y_data_real_lambda,
polynomial,
seed_list,
callbacks=None,
return_history=False,
each_epochs_save=None,
printing=False,
return_model=False):
from utilities.utility_functions import generate_paths, calculate_function_values_from_polynomial, pairwise
global loss_lambda
global list_of_monomial_identifiers
if polynomial is not None:
paths_dict = generate_paths(path_type = 'lambda_net')
else:
paths_dict = generate_paths(path_type = 'interpretation_net')
current_seed = None
if fixed_seed_lambda_training or fixed_initialization_lambda_training:
current_seed = seed_list[lambda_index%number_different_lambda_trainings]
if fixed_seed_lambda_training:
random.seed(current_seed)
np.random.seed(current_seed)
if int(tf.__version__[0]) >= 2:
tf.random.set_seed(current_seed)
else:
tf.set_random_seed(current_seed)
if each_epochs_save_lambda != None:
epochs_save_range = range(1, epochs_lambda//each_epochs_save_lambda+1) if each_epochs_save_lambda == 1 else range(epochs_lambda//each_epochs_save_lambda+1)
else:
epochs_save_range = None
if isinstance(X_data_lambda, pd.DataFrame):
X_data_lambda = X_data_lambda.values
if isinstance(y_data_real_lambda, pd.DataFrame):
y_data_real_lambda = y_data_real_lambda.values
X_train_lambda_with_valid, X_test_lambda, y_train_real_lambda_with_valid, y_test_real_lambda = train_test_split(X_data_lambda, y_data_real_lambda, test_size=0.25, random_state=current_seed)
X_train_lambda, X_valid_lambda, y_train_real_lambda, y_valid_real_lambda = train_test_split(X_train_lambda_with_valid, y_train_real_lambda_with_valid, test_size=0.25, random_state=current_seed)
model = Sequential()
#kerase defaults: kernel_initializer='glorot_uniform', bias_initializer='zeros'
if fixed_initialization_lambda_training:
model.add(Dense(lambda_network_layers[0], activation='relu', input_dim=X_data_lambda.shape[1], kernel_initializer=tf.keras.initializers.GlorotUniform(seed=current_seed), bias_initializer='zeros'))
else:
model.add(Dense(lambda_network_layers[0], activation='relu', input_dim=X_data_lambda.shape[1]))
if dropout > 0:
model.add(Dropout(dropout))
for neurons in lambda_network_layers[1:]:
if fixed_initialization_lambda_training:
model.add(Dense(neurons, activation='relu', kernel_initializer=tf.keras.initializers.GlorotUniform(seed=current_seed), bias_initializer='zeros'))
else:
model.add(Dense(neurons, activation='relu'))
if dropout > 0:
model.add(Dropout(dropout))
if fixed_initialization_lambda_training:
model.add(Dense(1, kernel_initializer=tf.keras.initializers.GlorotUniform(seed=current_seed), bias_initializer='zeros'))
else:
model.add(Dense(1))
try:
loss_lambda = tf.keras.losses.get(loss_lambda)
except ValueError as error_message:
if loss_lambda == 'r2':
loss_lambda = r2_keras_loss
else:
print(error_message)
model.compile(optimizer=optimizer_lambda,
loss=loss_lambda,
metrics=[r2_keras_loss, 'mae', tf.keras.metrics.RootMeanSquaredError()]
)
if early_stopping_lambda:
if callbacks == None:
callbacks = []
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, min_delta=early_stopping_min_delta_lambda, verbose=0, mode='min', restore_best_weights=True)
callbacks.append(early_stopping)
weights = []
polynomial_lstsq_pred_list = []
polynomial_lstsq_true_list = []
lstsq_data = np.random.uniform(low=x_min, high=x_max, size=(random_evaluation_dataset_size, n)) #y_train_pred_lambda.ravel()
terms_matrix = generate_term_matric_for_lstsq(lstsq_data, list_of_monomial_identifiers)
terms_matrix_train = generate_term_matric_for_lstsq(X_train_lambda, list_of_monomial_identifiers)
#y_train_real_lambda = y_train_real_lambda.astype(float)
#y_valid_real_lambda = y_valid_real_lambda.astype(float)
#y_test_real_lambda = y_test_real_lambda.astype(float)
if each_epochs_save == None or each_epochs_save==epochs_lambda:
model_history = model.fit(X_train_lambda,
y_train_real_lambda,
epochs=epochs_lambda,
batch_size=batch_lambda,
callbacks=callbacks,
validation_data=(X_valid_lambda, y_valid_real_lambda),
verbose=0,
workers=0)
weights.append(model.get_weights())
history = model_history.history
y_train_pred_lambda = model.predict(X_train_lambda)
y_valid_pred_lambda = model.predict(X_valid_lambda)
y_test_pred_lambda = model.predict(X_test_lambda)
y_random_pred_lambda = model.predict(lstsq_data)
polynomial_lstsq_pred, _, _, _ = np.linalg.lstsq(terms_matrix, y_random_pred_lambda.ravel(), rcond=-1)#[::-1]
polynomial_lstsq_true, _, _, _ = np.linalg.lstsq(terms_matrix_train, y_train_real_lambda.ravel(), rcond=-1)#[::-1]
polynomial_lstsq_pred_list.append(polynomial_lstsq_pred)
polynomial_lstsq_true_list.append(polynomial_lstsq_true)
y_train_pred_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_pred, X_train_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_train_real_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_true, X_train_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_valid_pred_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_pred, X_valid_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_valid_real_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_true, X_valid_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_test_pred_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_pred, X_test_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_test_real_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_true, X_test_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
pred_list = {'lambda_index': lambda_index,
'y_train_real_lambda': y_train_real_lambda,
'y_train_pred_lambda': y_train_pred_lambda,
'y_train_pred_lambda_poly_lstsq': y_train_pred_lambda_poly_lstsq,
#y_train_real_lambda_poly_lstsq,
'X_train_lambda': X_train_lambda,
'y_valid_real_lambda': y_valid_real_lambda,
'y_valid_pred_lambda': y_valid_pred_lambda,
'y_valid_pred_lambda_poly_lstsq': y_valid_pred_lambda_poly_lstsq,
#y_valid_real_lambda_poly_lstsq,
'X_valid_lambda': X_valid_lambda,
'y_test_real_lambda': y_test_real_lambda,
'y_test_pred_lambda': y_test_pred_lambda,
'y_test_pred_lambda_poly_lstsq': y_test_pred_lambda_poly_lstsq,
#y_test_real_lambda_poly_lstsq,
'X_test_lambda': X_test_lambda}
scores_train, std_train, mean_train = evaluate_lambda_net('TRAIN', X_train_lambda, y_train_real_lambda, y_train_pred_lambda, y_train_pred_lambda_poly_lstsq, y_train_real_lambda_poly_lstsq)
scores_valid, std_valid, mean_valid = evaluate_lambda_net('VALID', X_valid_lambda, y_valid_real_lambda, y_valid_pred_lambda, y_valid_pred_lambda_poly_lstsq, y_valid_real_lambda_poly_lstsq)
scores_test, std_test, mean_test = evaluate_lambda_net('TEST', X_test_lambda, y_test_real_lambda, y_test_pred_lambda, y_test_pred_lambda_poly_lstsq, y_test_real_lambda_poly_lstsq)
scores_std = {}
for aDict in (std_train, std_valid, std_test):
scores_std.update(aDict)
scores_mean = {}
for aDict in (mean_train, mean_valid, mean_test):
scores_mean.update(aDict)
scores_list = [lambda_index,
scores_train,
scores_valid,
scores_test,
scores_std,
scores_mean]
else:
scores_list = []
pred_list = []
for i in epochs_save_range:
train_epochs_step = each_epochs_save if i > 1 else max(each_epochs_save-1, 1) if i==1 else 1
model_history = model.fit(X_train_lambda,
y_train_real_lambda,
epochs=train_epochs_step,
batch_size=batch_lambda,
callbacks=callbacks,
validation_data=(X_valid_lambda, y_valid_real_lambda),
verbose=0,
workers=1,
use_multiprocessing=False)
#history adjustment for continuing training
if i == 0 and each_epochs_save != 1 or i == 1 and each_epochs_save == 1:
history = model_history.history
else:
history = mergeDict(history, model_history.history)
weights.append(model.get_weights())
y_train_pred_lambda = model.predict(X_train_lambda)
y_valid_pred_lambda = model.predict(X_valid_lambda)
y_test_pred_lambda = model.predict(X_test_lambda)
y_random_pred_lambda = model.predict(lstsq_data)
polynomial_lstsq_pred, _, _, _ = np.linalg.lstsq(terms_matrix, y_random_pred_lambda.ravel(), rcond=-1)#[::-1]
#does not change over time
if i == 0 and each_epochs_save != 1 or i == 1 and each_epochs_save == 1:
polynomial_lstsq_true, _, _, _ = np.linalg.lstsq(terms_matrix_train, y_train_real_lambda.ravel(), rcond=-1)#[::-1]
polynomial_lstsq_pred_list.append(polynomial_lstsq_pred)
polynomial_lstsq_true_list.append(polynomial_lstsq_true)
y_train_pred_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_pred, X_train_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_valid_pred_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_pred, X_valid_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_test_pred_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_pred, X_test_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
if i == 0 and each_epochs_save != 1 or i == 1 and each_epochs_save == 1:
y_train_real_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_true, X_train_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_valid_real_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_true, X_valid_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
y_test_real_lambda_poly_lstsq = calculate_function_values_from_polynomial(polynomial_lstsq_true, X_test_lambda, force_complete_poly_representation=True, list_of_monomial_identifiers=list_of_monomial_identifiers)
pred_list.append({'lambda_index': lambda_index,
'y_train_real_lambda': y_train_real_lambda,
'y_train_pred_lambda': y_train_pred_lambda,
'y_train_pred_lambda_poly_lstsq': y_train_pred_lambda_poly_lstsq,
#y_train_real_lambda_poly_lstsq,
'X_train_lambda': X_train_lambda,
'y_valid_real_lambda': y_valid_real_lambda,
'y_valid_pred_lambda': y_valid_pred_lambda,
'y_valid_pred_lambda_poly_lstsq': y_valid_pred_lambda_poly_lstsq,
#y_valid_real_lambda_poly_lstsq,
'X_valid_lambda': X_valid_lambda,
'y_test_real_lambda': y_test_real_lambda,
'y_test_pred_lambda': y_test_pred_lambda,
'y_test_pred_lambda_poly_lstsq': y_test_pred_lambda_poly_lstsq,
#y_test_real_lambda_poly_lstsq,
'X_test_lambda': X_test_lambda})
scores_train, std_train, mean_train = evaluate_lambda_net('TRAIN', X_train_lambda, y_train_real_lambda, y_train_pred_lambda, y_train_pred_lambda_poly_lstsq, y_train_real_lambda_poly_lstsq)
scores_valid, std_valid, mean_valid = evaluate_lambda_net('VALID', X_valid_lambda, y_valid_real_lambda, y_valid_pred_lambda, y_valid_pred_lambda_poly_lstsq, y_valid_real_lambda_poly_lstsq)
scores_test, std_test, mean_test = evaluate_lambda_net('TEST', X_test_lambda, y_test_real_lambda, y_test_pred_lambda, y_test_pred_lambda_poly_lstsq, y_test_real_lambda_poly_lstsq)
scores_std = {}
for aDict in (std_train, std_valid, std_test):
scores_std.update(aDict)
scores_mean = {}
for aDict in (mean_train, mean_valid, mean_test):
scores_mean.update(aDict)
scores_list_single_epoch = [lambda_index,
scores_train,
scores_valid,
scores_test,
scores_std,
scores_mean]
scores_list.append(scores_list_single_epoch)
if printing and polynomial is not None:
for i, (weights_for_epoch, polynomial_lstsq_pred_for_epoch, polynomial_lstsq_true_for_epoch) in enumerate(zip(weights, polynomial_lstsq_pred_list, polynomial_lstsq_true_list)):
directory = './data/weights/weights_' + paths_dict['path_identifier_lambda_net_data'] + '/'
if each_epochs_save == None or each_epochs_save==epochs_lambda:
path_weights = directory + 'weights_epoch_' + str(epochs_lambda).zfill(3) + '.txt'
else:
index = (i+1)*each_epochs_save if each_epochs_save==1 else i*each_epochs_save if i > 1 else each_epochs_save if i==1 else 1
path_weights = directory + 'weights_epoch_' + str(index).zfill(3) + '.txt'
with open(path_weights, 'a') as text_file:
text_file.write(str(lambda_index))
text_file.write(', ' + str(current_seed))
for i, value in enumerate(polynomial.values):
text_file.write(', ' + str(value))
for value in polynomial_lstsq_pred_for_epoch:
text_file.write(', ' + str(value))
for value in polynomial_lstsq_true_for_epoch:
text_file.write(', ' + str(value))
for layer_weights, biases in pairwise(weights_for_epoch): #clf.get_weights()
for neuron in layer_weights:
for weight in neuron:
text_file.write(', ' + str(weight))
for bias in biases:
text_file.write(', ' + str(bias))
text_file.write('\n')
text_file.close()
path_X_data = directory + 'lambda_X_test_data.txt'
path_y_data = directory + 'lambda_y_test_data.txt'
with open(path_X_data, 'a') as text_file:
text_file.write(str(lambda_index))
for row in X_test_lambda:
for value in row:
text_file.write(', ' + str(value))
text_file.write('\n')
text_file.close()
with open(path_y_data, 'a') as text_file:
text_file.write(str(lambda_index))
for value in y_test_real_lambda.flatten():
text_file.write(', ' + str(value))
text_file.write('\n')
text_file.close()
if return_model:
return (lambda_index, current_seed, polynomial, polynomial_lstsq_pred_list, polynomial_lstsq_true_list), scores_list, pred_list, history, model
elif return_history:
return (lambda_index, current_seed, polynomial, polynomial_lstsq_pred_list, polynomial_lstsq_true_list), scores_list, pred_list, history, #polynomial_lstsq_pred_list, polynomial_lstsq_true_list#, weights, history
else:
return (lambda_index, current_seed, polynomial, polynomial_lstsq_pred_list, polynomial_lstsq_true_list), scores_list, pred_list#, weights
|
from time import time
import subprocess
import os
def lambda_handler(event, context):
file_size = event['file_size']
byte_size = int(event['byte_size'])
file_write_path = '/tmp/file'
start = time()
with open(file_write_path, 'wb', buffering=byte_size) as f:
f.write(os.urandom(file_size * 1024 * 1024))
f.flush()
os.fsync(f.fileno())
disk_write_latency = time() - start
disk_write_bandwidth = file_size / disk_write_latency
output = subprocess.check_output(['ls', '-alh', '/tmp/'])
print(output)
start = time()
with open(file_write_path, 'rb', buffering=byte_size) as f:
byte = f.read(byte_size)
while byte != "":
byte = f.read(byte_size)
disk_read_latency = time() - start
disk_read_bandwidth = file_size / disk_read_latency
rm = subprocess.Popen(['rm', '-rf', file_write_path])
rm.communicate()
return {
'disk_write_bandwidth':disk_write_bandwidth,
'disk_write_latency':disk_write_latency,
'disk_read_bandwidth':disk_read_bandwidth,
'disk_read_latency':disk_read_latency
}
|
import threading # on importe threading, une librairie qui permet du faire du multithread
import socket # on importe socket, une libraire pour le tcp/ip
import chess # on importe chess, une librairie moteur d'échec
import json # on importe json, une librairue pour traiter du json
import random # on importe random, une librairie pour générer des nombres aléatoires
import time # on importe time, une librairie de temps
import os # on importe la librairie système
linux = False
def clear(): # On définit la fonction pour nettoyer le terminal
os.system("clear" if linux else "cls") # On execute cls sour windows ou clear sous linux
class Game(threading.Thread): # on initialise une nouvelle classe "Game"
def __init__(self): # Fonction d'initialisation
threading.Thread.__init__(self)
self.board = chess.Board() # On initie un plateau
self.id = (int(time.time()*100) ^ random.randint(1, 1000)) # on génère une clé avec du xor
# On initialise des variables
self.isWhiteUsed = None
self.isBlackUsed = None
self.tour = 0 # %2==0 is that it's white to play
def run(self):
while (self.isWhiteUsed is None) or (self.isBlackUsed is None): # On attend que les deux soient connectés
pass
self.begin() # On initialise
def getColor(self, conn): # On définit la fonction qui désigne la couleur
if(self.isWhiteUsed is None and self.isBlackUsed is None): # on regarde si ni les blancs ni les noirs sont pris
x = random.randint(0, 1) # On pioche au hasard
if x: # Si on a choisi 1 donc noir
self.isBlackUsed = conn # On affecte la connection à la variable
return 1 # On renvoie qu'on a choisi noir
else: # Sinon on a choisi blanc
self.isWhiteUsed = conn # On affecte la connection à la variable
return -1 # On renvoie qu'on a choisi blanc
elif(self.isBlackUsed is None): # Si le noir n'est pas utilisé (et que les blancs sont donc pris)
self.isBlackUsed = conn # On affecte la connection à la variable
return 1 # On renvoie que l'utilisateur sera noir
elif(self.isWhiteUsed is None): # Si le blanc n'est pas utilisé (et que les noirs sont donc pris)
self.isWhiteUsed = conn # On affecte la connection à la variable
return -1 # On renvoie que l'utilisateur sera blanc
else: # C'est que déjà deux joueurs sont présents
return 0 # On renvoie une erreur
def move(self, color, moveName): # Fonction pour commander une pièce
if not(self.isWhiteUsed is None) and not(self.isBlackUsed is None): # Est ce que les deux joueurs sont présents
if color == (-1 if self.tour%2==0 else 1): # Est ce que c'est à la bonne personne de jouer
if not(self.board.is_game_over()):
if len(moveName) == 4:
if chess.Move.from_uci(moveName) in self.board.legal_moves: # Est ce que le mouvement est possible
m = chess.Move.from_uci(moveName) # On inscrit le mouvement
self.board.push(m) # On le mets dans le plateau
self.tour += 1 # On incrémente le tour
if color == -1: # Si couleur blanche
self.isWhiteUsed.send(json.dumps({"error": 0}).encode()) #dire qu'il n'y a pas d'erreur
else: # Sinon
self.isBlackUsed.send(json.dumps({"error": 0}).encode()) # dire qu'il n'y a pas d'erreur
return 0 # On renvoie que c'est bon
else: # Si le mouvement est illégal
if not(0 if self.tour%2==0 else 1): # Si c'est aux blancs
self.isWhiteUsed.send(json.dumps({"over": self.board.is_game_over(), "error": 4, "board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)}).encode()) # On lui envoie une erreur 4
else: # Si c'est au noirs
self.isBlackUsed.send(json.dumps({"over": self.board.is_game_over(), "error": 4, "board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)}).encode()) # On lui envoie une erreur 4
return 4 # On return l'error
else:
if not(0 if self.tour%2==0 else 1): # Si c'est aux blancs
self.isWhiteUsed.send(json.dumps({"over": self.board.is_game_over(), "error": 4, "board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)}).encode()) # On lui envoie une erreur 4
else: # Si c'est au noirs
self.isBlackUsed.send(json.dumps({"over": self.board.is_game_over(), "error": 4, "board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)}).encode()) # On lui envoie une erreur 4
return 4 # On return l'error
else: # Si c'est echec
pass # Ben on s'en fout
message = {"board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)} # On envoie l'objet du plateau
message["over"] = self.board.is_game_over() # On ajoute l'echec et mat
self.isBlackUsed.send(json.dumps(message).encode()) # On envoie le message au client noir
self.isWhiteUsed.send(json.dumps(message).encode()) # On envoie le message au client blanc
else: # Si un des utilisateur est pas connecté
self.isWhiteUsed.send(json.dumps({"over": self.board.is_game_over(), "error": 3, "board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)}).encode()) # On lui envoie une erreur 3
self.isBlackUsed.send(json.dumps({"over": self.board.is_game_over(), "error": 3, "board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)}).encode()) # On lui envoie une erreur 3
return 3 # Erreur 3 (errors.txt)
def begin(self):
message = {"board": self.board.fen(), "is2Play": (-1 if self.tour%2==0 else 1)} # On envoie l'objet du plateau
self.isBlackUsed.send(json.dumps(message).encode()) # On envoie le message au client noir
self.isWhiteUsed.send(json.dumps(message).encode()) # On envoie le message au client blanc
class Supervisor(threading.Thread):
def __init__(self, parent, parentOfParent):
threading.Thread.__init__(self)
self.parent = parentOfParent
self._self = parent
def run(self):
print(self.parent.connections)
message = self._self.conn.recv(2048) # On prends le message du client qui contient si on ouvre ou on crée une partie
message = json.loads(message.decode()) # On convertit la chaîne de caractère formatée JSON en tableau
if message["party"] == "new": # Si on crée une partie
gInst = Game() # On initialise une nouvelle instance de Game
gInst.start() # On démarre le thread
self._self.conn.send(json.dumps({"id": gInst.id}).encode()) # On envoie l'ID
self._self.parent.games[gInst.id] = gInst # On renseigne le moteur de jeu grace à son id
self._self.parent.connections[-1].gameObject = gInst # On l'affecte à la connection
self._self.parent.connections[-1].gameId = gInst.id # Ainsi que l'ID de partie
elif message["party"] == "join": # Si on joint une partie
_pass = True # Retour du try:
link = 0 # POur eviter les problèmes
try: # On essaye
link = int(message["link"]) # de transformer en int
except: # Si qq passe pas bien
_pass = False # On le sauvegarde
if link in self.parent.games and _pass: # Si l'id de partie est connue et pas d'erreur
self._self.conn.send(json.dumps({"error": 0}).encode()) # On renvoie au client qu'il n'y a pas d'erreur
self._self.parent.connections[-1].gameObject = self._self.parent.games[link] # On l'affecte à la connection
self._self.parent.connections[-1].gameId = self._self.parent.games[link].id # Ainsi que l'ID de partie
else: # Sinon
self._self.conn.send(json.dumps({"error": 5}).encode()) # Erreur (pour changer)
class Connection(threading.Thread): # On crée une classe Multithread qui gère UNE connection via TCP/IP
def __init__(self, conn, addr, id, parent): # on définit la fonction utilisation
threading.Thread.__init__(self) # On configure le thread
# On copie les variables
self.conn = conn
self.addr = addr
self.id = id
# On initialise les variables
self.gameId = None
self.gameObject = None
self.black_white = None
self.stop = False # On stop le thread
self.parent = parent
self.s = Supervisor(self, self.parent)
def run(self): # Lancement du thread
self.s.start()
self.s.join()
print("Supervisor finished "+str(self.addr))
while (self.gameId is None) or (self.gameObject is None): # On attend d'avoir un game ID
pass
print("GameObject satisfied "+str(self.addr))
self.black_white = self.gameObject.getColor(self.conn) # On obtient la couleur
if(self.black_white == 0): # Si la fonction renvoie 0 c'est que le jeu est déja satisfait
self.conn.send(json.dumps({"error": 1}).encode()) # Donc on envoie le code 1 (errors.txt) au client
else: # Couleur choisie
self.conn.send(json.dumps({"error": 0, "color": self.black_white}).encode()) # On envoie au client que c'est bon
while (self.gameObject.isWhiteUsed is None) or (self.gameObject.isBlackUsed is None): # On attend qu'ils soient connectés
pass
print("Game satisfied "+str(self.addr))
while not self.stop: # Tant que ce n'est pas stoppé
message = self.conn.recv(2048) # On récupère un message du client
print("163"+message.decode()) # On débug :)
if message.decode() != "": # Si c'est pas vide
message = json.loads(message.decode()) # On le décode
if(message["move"] is not None): # Si on lui demande de bouger quelque chose
result = self.gameObject.move(self.black_white, message["move"])#On appelle la fonction move
renvoi = {} # On initialise la variable renvoi
renvoi["over"] = self.gameObject.board.is_game_over() # On ajoute si mat
renvoi["board"] = self.gameObject.board.fen() # On renvoie le plateau
renvoi["error"] = result # On renvoie le message d'erreur
print("172"+str(renvoi)) # On débug ;)
# self.conn.send(json.dumps(renvoi).encode()) # On utilise la connexion du client pour lui envoyer la variable renvoi.
self.gameObject.isWhiteUsed.send(json.dumps(renvoi).encode()) # On envoie le message
self.gameObject.isBlackUsed.send(json.dumps(renvoi).encode()) # Idem
self.gameObject.begin() # On utilise begin() comme il faut pas XD
else: # Si le message ne contient pas de mouvement
self.conn.send(json.dumps({"error": 4}).encode()) # On envoie l'erreur
else: # Sinon
message = {"move": None} # On s'en fout et on évite les bugs
class MainThread(threading.Thread): # On crée jne classe multiThread qui est le coeur du système
def __init__(self, port=554, ip="127.0.0.1"): # On définit la fonction d'initialisation
threading.Thread.__init__(self) # On définit le thread
# On copie la variable port mais pas trop globale :)
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # On initialise le socket
self.sock.bind((ip, self.port)) # On affecte le port au socket
# Initialisation des variables
self.stop = False
self.accept = True
self.games = {}
self.connections = []
def run(self): # On définit la fonction principal
self.sock.listen(1) # On commence écouter
while not self.stop: # Tant que le thread n'est pas fini
if self.accept: # Si on accepte :
try: # Pour eviter les erreures lors d'un arrêt méchant
conn, addr = self.sock.accept() # On attends la venu d'une connection et on accepte
except: # Si ya des erreures
break # On quitte
print("Nouvelle connection avec "+str(addr)) # On débug
self.connections.append(Connection(conn, addr, len(self.connections), self)) # On ouvre une nouvelle instance de Connection et on la mets dans la variable self.connections
self.connections[-1].start() # On démare l'instance de connection
for conn in self.connections: # Pour toutes les conections
conn.conn.close() # Auf widersehen
conn.stop = True # On arrête le thread
conn.join() # On attend qu'ils s'arrête
if __name__ == "__main__": # Si le code est éxecuté et pas ouvert par un autre programme python
clear() # On clear le terminal
port = 554 # On définit le port
hostname = socket.gethostname() # On récupère le nom du pc
IPAddr = socket.gethostbyname(hostname) # et son ip
print("Serveur démaré sur le port "+str(port)) # Quand on a pas envie de lire la doc ;)
mt = MainThread(port=port, ip=IPAddr) # On ouvre une instance de MainThread sur le port 554
mt.start() # et on la démarre
stop = False # On définit la variable du stop
while not(stop): # tant qu'on s'arrête pas
m = input("$ ") # On demande une commande
if "stop" in m: # si on a entré fini
stop = True # on s'arrête
mt.stop = True # On coupe le thread
mt.sock.close() # On arrete violamanet le serveur grâce à de merveilleuses erreures
mt.join() # On attend que le thread soit coupé
print("Stop") # On affiche stop
exit(1) # On quitte
|
import re
import unicodedata
import numpy as np
import torch
def get_ner_BIO(label_list):
# list_len = len(word_list)
# assert(list_len == len(label_list)), "word list size unmatch with label list"
list_len = len(label_list)
begin_label = 'B'
inside_label = 'I'
whole_tag = ''
index_tag = ''
tag_list = []
stand_matrix = []
for i in range(0, list_len):
# wordlabel = word_list[i]
current_label = label_list[i].upper()
if begin_label in current_label:
if index_tag == '':
# whole_tag = 'answer' + '[' + str(i)
whole_tag = '[' + str(i)
index_tag = 'answer'
# whole_tag = current_label.replace(begin_label,"",1) +'[' +str(i)
# index_tag = current_label.replace(begin_label,"",1)
else:
tag_list.append(whole_tag + ',' + str(i-1))
# whole_tag = 'answer' + '[' + str(i)
whole_tag = '[' + str(i)
index_tag = 'answer'
# whole_tag = current_label.replace(begin_label,"",1) + '[' + str(i)
# index_tag = current_label.replace(begin_label,"",1)
elif inside_label in current_label:
if 'answer' == index_tag:
whole_tag = whole_tag
else:
if (whole_tag != '')&(index_tag != ''):
tag_list.append(whole_tag + ',' + str(i-1))
whole_tag = ''
index_tag = ''
else:
if (whole_tag != '')&(index_tag != ''):
tag_list.append(whole_tag + ',' + str(i-1))
whole_tag = ''
index_tag = ''
# if (whole_tag != '')&(index_tag != ''):
# tag_list.append(whole_tag)
if whole_tag !='' and ',' not in whole_tag:
tag_list.append(whole_tag + ',' + str(list_len - 1))
tag_list_len = len(tag_list)
for i in range(0, tag_list_len):
if len(tag_list[i]) > 0:
tag_list[i] = tag_list[i] + ']'
insert_list = reverse_style(tag_list[i])
stand_matrix.append(eval(insert_list))
return stand_matrix
def reverse_style(input_string):
target_position = input_string.index('[')
input_len = len(input_string)
output_string = input_string[target_position:input_len] + input_string[0:target_position]
return output_string
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, str)
def padding(seqs, is_float=False, batch_first=False):
lengths = [len(s) for s in seqs]
seqs = [torch.Tensor(s) for s in seqs]
batch_length = max(lengths)
seq_tensor = torch.FloatTensor(batch_length, len(seqs)).fill_(float(0)) if is_float \
else torch.LongTensor(batch_length, len(seqs)).fill_(0)
for i, s in enumerate(seqs):
end_seq = lengths[i]
seq_tensor[:end_seq, i].copy_(s[:end_seq])
if batch_first:
seq_tensor = seq_tensor.t()
return seq_tensor, lengths
def mpn_padding(seqs, label, class_num, is_float=False, use_bert=False):
lengths = [len(s) for s in seqs]
seqs = [torch.Tensor(s) for s in seqs]
batch_length = max(lengths)
o1_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
o2_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
for i, label_ in enumerate(label):
for attr in label_:
if use_bert:
o1_tensor[i, attr.value_pos_start + 1, attr.attr_type_id] = 1
o2_tensor[i, attr.value_pos_end, attr.attr_type_id] = 1
else:
o1_tensor[i, attr.value_pos_start, attr.attr_type_id] = 1
o2_tensor[i, attr.value_pos_end - 1, attr.attr_type_id] = 1
return o1_tensor, o2_tensor
def spo_padding(seqs, label, class_num, is_float=False, use_bert=False):
lengths = [len(s) for s in seqs]
seqs = [torch.Tensor(s) for s in seqs]
batch_length = max(lengths)
o1_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
o2_tensor = torch.FloatTensor(len(seqs), batch_length, class_num).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num).fill_(0)
for i, label_ in enumerate(label):
for po in label_:
if use_bert:
o1_tensor[i, po.object_start + 1, po.predict_type_id] = 1
o2_tensor[i, po.object_end, po.predict_type_id] = 1
else:
o1_tensor[i, po.object_start, po.predict_type_id] = 1
o2_tensor[i, po.object_end - 1, po.predict_type_id] = 1
return o1_tensor, o2_tensor
def _handle_pos_limit(pos, limit=30):
for i, p in enumerate(pos):
if p > limit:
pos[i] = limit
if p < -limit:
pos[i] = -limit
return [p + limit + 1 for p in pos]
def find_position(entity_name, text):
start = text.find(entity_name, 0)
return start, start + len(entity_name)
class BasicTokenizer(object):
"""分词器基类
"""
def __init__(self, do_lower_case=False):
"""初始化
"""
self._token_pad = '[PAD]'
self._token_cls = '[CLS]'
self._token_sep = '[SEP]'
self._token_unk = '[UNK]'
self._token_mask = '[MASK]'
self._do_lower_case = do_lower_case
def tokenize(self, text, add_cls=True, add_sep=True, max_length=None):
"""分词函数
"""
if self._do_lower_case:
text = unicodedata.normalize('NFD', text)
text = ''.join(
[ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
tokens = self._tokenize(text)
if add_cls:
tokens.insert(0, self._token_cls)
if add_sep:
tokens.append(self._token_sep)
if max_length is not None:
self.truncate_sequence(max_length, tokens, None, -2)
return tokens
def token_to_id(self, token):
"""token转换为对应的id
"""
raise NotImplementedError
def tokens_to_ids(self, tokens):
"""token序列转换为对应的id序列
"""
return [self.token_to_id(token) for token in tokens]
def truncate_sequence(self,
max_length,
first_sequence,
second_sequence=None,
pop_index=-1):
"""截断总长度
"""
if second_sequence is None:
second_sequence = []
while True:
total_length = len(first_sequence) + len(second_sequence)
if total_length <= max_length:
break
elif len(first_sequence) > len(second_sequence):
first_sequence.pop(pop_index)
else:
second_sequence.pop(pop_index)
def encode(self,
first_text,
second_text=None,
max_length=None,
first_length=None,
second_length=None):
"""输出文本对应token id和segment id
如果传入first_length,则强行padding第一个句子到指定长度;
同理,如果传入second_length,则强行padding第二个句子到指定长度。
"""
if is_string(first_text):
first_tokens = self.tokenize(first_text)
else:
first_tokens = first_text
if second_text is None:
second_tokens = None
elif is_string(second_text):
second_tokens = self.tokenize(second_text, add_cls=False)
else:
second_tokens = second_text
if max_length is not None:
self.truncate_sequence(max_length, first_tokens, second_tokens, -2)
first_token_ids = self.tokens_to_ids(first_tokens)
if first_length is not None:
first_token_ids = first_token_ids[:first_length]
first_token_ids.extend([self._token_pad_id] *
(first_length - len(first_token_ids)))
first_segment_ids = [0] * len(first_token_ids)
if second_text is not None:
second_token_ids = self.tokens_to_ids(second_tokens)
if second_length is not None:
second_token_ids = second_token_ids[:second_length]
second_token_ids.extend(
[self._token_pad_id] *
(second_length - len(second_token_ids)))
second_segment_ids = [1] * len(second_token_ids)
first_token_ids.extend(second_token_ids)
first_segment_ids.extend(second_segment_ids)
return first_token_ids, first_segment_ids
def id_to_token(self, i):
"""id序列为对应的token
"""
raise NotImplementedError
def ids_to_tokens(self, ids):
"""id序列转换为对应的token序列
"""
return [self.id_to_token(i) for i in ids]
def decode(self, ids):
"""转为可读文本
"""
raise NotImplementedError
def _tokenize(self, text):
"""基本分词函数
"""
raise NotImplementedError
class Tokenizer(BasicTokenizer):
"""Bert原生分词器
纯Python实现,代码修改自keras_bert的tokenizer实现
"""
def __init__(self, token_dict, do_lower_case=False):
"""初始化
"""
super(Tokenizer, self).__init__(do_lower_case)
if is_string(token_dict):
token_dict = load_vocab(token_dict)
self._token_dict = token_dict
self._token_dict_inv = {v: k for k, v in token_dict.items()}
for token in ['pad', 'cls', 'sep', 'unk', 'mask']:
try:
_token_id = token_dict[getattr(self, '_token_%s' % token)]
setattr(self, '_token_%s_id' % token, _token_id)
except:
pass
self._vocab_size = len(token_dict)
def token_to_id(self, token):
"""token转换为对应的id
"""
return self._token_dict.get(token, self._token_unk_id)
def id_to_token(self, i):
"""id转换为对应的token
"""
return self._token_dict_inv[i]
def decode(self, ids, tokens=None):
"""转为可读文本
"""
tokens = tokens or self.ids_to_tokens(ids)
tokens = [token for token in tokens if not self._is_special(token)]
text, flag = '', False
for i, token in enumerate(tokens):
if token[:2] == '##':
text += token[2:]
elif len(token) == 1 and self._is_cjk_character(token):
text += token
elif len(token) == 1 and self._is_punctuation(token):
text += token
text += ' '
elif i > 0 and self._is_cjk_character(text[-1]):
text += token
else:
text += ' '
text += token
text = re.sub(' +', ' ', text)
text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text)
punctuation = self._cjk_punctuation() + '+-/={(<['
punctuation_regex = '|'.join([re.escape(p) for p in punctuation])
punctuation_regex = '(%s) ' % punctuation_regex
text = re.sub(punctuation_regex, '\\1', text)
text = re.sub('(\d\.) (\d)', '\\1\\2', text)
return text.strip()
def _tokenize(self, text):
"""基本分词函数
"""
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
tokens = []
for word in spaced.strip().split():
tokens.extend(self._word_piece_tokenize(word))
return tokens
def _word_piece_tokenize(self, word):
"""word内分成subword
"""
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def _is_space(ch):
"""空格类字符判断
"""
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \
unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
@staticmethod
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
@staticmethod
def _is_special(ch):
"""判断是不是有特殊含义的符号
"""
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
def load_vocab(dict_path, encoding='utf-8', simplified=False, startwith=None):
"""从bert的词典文件中读取词典
"""
token_dict = {}
with open(dict_path, encoding=encoding) as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
if simplified: # 过滤冗余部分token
new_token_dict, keep_tokens = {}, []
startwith = startwith or []
for t in startwith:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
for t, _ in sorted(token_dict.items(), key=lambda s: s[1]):
if t not in new_token_dict:
keep = True
if len(t) > 1:
for c in (t[2:] if t[:2] == '##' else t):
if (Tokenizer._is_cjk_character(c)
or Tokenizer._is_punctuation(c)):
keep = False
break
if keep:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
return new_token_dict, keep_tokens
else:
return token_dict
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
def sequence_padding(inputs, length=None, padding=0, is_float=False):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = max([len(x) for x in inputs])
outputs = np.array([
np.concatenate([x, [padding] * (length - len(x))])
if len(x) < length else x[:length] for x in inputs
])
out_tensor = torch.FloatTensor(outputs) if is_float \
else torch.LongTensor(outputs)
return torch.tensor(out_tensor)
def batch_gather(data: torch.Tensor, index: torch.Tensor):
length = index.shape[0]
t_index = index.cpu().numpy()
t_data = data.cpu().data.numpy()
result = []
for i in range(length):
result.append(t_data[i, t_index[i], :])
return torch.from_numpy(np.array(result)).to(data.device)
def select_padding(seqs, select, is_float=False, class_num=None):
lengths = [len(s) for s in seqs]
batch_length = max(lengths)
seq_tensor = torch.FloatTensor(len(seqs), batch_length, class_num, batch_length).fill_(float(0)) if is_float \
else torch.LongTensor(len(seqs), batch_length, class_num, batch_length).fill_(0)
# NA = BAIDU_SELECT['NA']
# seq_tensor[:, :, NA, :] = 1
for i, triplet_list in enumerate(select):
for triplet in triplet_list:
subject_pos = triplet[0]
object_pos = triplet[1]
predicate = triplet[2]
seq_tensor[i, subject_pos, predicate, object_pos] = 1
# seq_tensor[i, subject_pos, NA, object_pos] = 0
return seq_tensor
if __name__=='__main__':
a = ['O', 'B', 'I', 'I', 'O', 'O', 'O', 'B', 'I']
print(get_ner_BIO(a))
|
import os
from pathlib import Path
import numpy as np
from padertorch.contrib.examples.audio_tagging.data import get_datasets
from padertorch.contrib.examples.audio_tagging.models import CRNN
from paderbox.utils.timer import timeStamped
from padertorch.contrib.je.modules.augment import (
MelWarping, LogTruncNormalSampler, TruncExponentialSampler
)
from padertorch.train.optimizer import Adam
from padertorch.train.trainer import Trainer
from sacred import Experiment as Exp
from sacred.commands import print_config
from sacred.observers import FileStorageObserver
nickname = 'audio_tagging'
ex = Exp(nickname)
storage_dir = str(
Path(os.environ['STORAGE_ROOT']) / nickname / timeStamped('')[1:]
)
observer = FileStorageObserver.create(storage_dir)
ex.observers.append(observer)
@ex.config
def config():
resume = False
# Data configuration
audio_reader = {
'source_sample_rate': None,
'target_sample_rate': 44100,
}
stft = {
'shift': 882,
'window_length': 2*882,
'size': 2048,
'fading': None,
'pad': False,
}
batch_size = 24
num_workers = 8
prefetch_buffer = 10 * batch_size
max_total_size = None
max_padding_rate = 0.1
bucket_expiration = 1000 * batch_size
# Trainer configuration
trainer = {
'model': {
'factory': CRNN,
'feature_extractor': {
'sample_rate': audio_reader['target_sample_rate'],
'fft_length': stft['size'],
'n_mels': 128,
'warping_fn': {
'factory': MelWarping,
'alpha_sampling_fn': {
'factory': LogTruncNormalSampler,
'scale': .07,
'truncation': np.log(1.3),
},
'fhi_sampling_fn': {
'factory': TruncExponentialSampler,
'scale': .5,
'truncation': 5.,
},
},
'max_resample_rate': 1.,
'n_time_masks': 1,
'max_masked_time_steps': 70,
'max_masked_time_rate': .2,
'n_mel_masks': 1,
'max_masked_mel_steps': 16,
'max_masked_mel_rate': .2,
'max_noise_scale': .0,
},
'cnn_2d': {
'out_channels': [16, 16, 32, 32, 64, 64, 128, 128, 256],
'pool_size': [1, 2, 1, 2, 1, 2, 1, (2, 1), (2, 1)],
# 'residual_connections': [None, 3, None, 5, None, 7, None],
'output_layer': False,
'kernel_size': 3,
'norm': 'batch',
'activation_fn': 'relu',
# 'pre_activation': True,
'dropout': .0,
},
'cnn_1d': {
'out_channels': 3*[512],
# 'residual_connections': [None, 3, None],
'input_layer': False,
'output_layer': False,
'kernel_size': 3,
'norm': 'batch',
'activation_fn': 'relu',
# 'pre_activation': True,
'dropout': .0,
},
'rnn_fwd': {
'hidden_size': 512,
'num_layers': 2,
'dropout': .0,
},
'clf_fwd': {
'out_channels': [512, 527],
'input_layer': False,
'kernel_size': 1,
'norm': 'batch',
'activation_fn': 'relu',
'dropout': .0,
},
'rnn_bwd': {
'hidden_size': 512,
'num_layers': 2,
'dropout': .0,
},
'clf_bwd': {
'out_channels': [512, 527],
'input_layer': False,
'kernel_size': 1,
'norm': 'batch',
'activation_fn': 'relu',
'dropout': .0,
},
},
'optimizer': {
'factory': Adam,
'lr': 3e-4,
'gradient_clipping': 20.,
},
'storage_dir': storage_dir,
'summary_trigger': (100, 'iteration'),
'checkpoint_trigger': (1000, 'iteration'),
'stop_trigger': (100000, 'iteration')
}
Trainer.get_config(trainer)
@ex.automain
def train(
_run,
audio_reader, stft,
num_workers, batch_size, max_padding_rate,
trainer, resume,
):
print_config(_run)
trainer = Trainer.from_config(trainer)
train_iter, validation_iter = get_datasets(
audio_reader=audio_reader, stft=stft,
num_workers=num_workers,
batch_size=batch_size,
max_padding_rate=max_padding_rate,
storage_dir=trainer.storage_dir
)
trainer.test_run(train_iter, validation_iter)
trainer.register_validation_hook(
validation_iter, metric='macro_fscore', maximize=True
)
trainer.train(train_iter, resume=resume)
|
from shared.card import CARD_PLACEHOLDER_LENGTH
import pygame
import pygame.locals as pl
from twisted.logger import Logger
class View(object):
log = Logger()
def __init__(self, display):
self.display = display
self.first_call = False
self.tab_order = []
self.tab_position = 0
if self.display.accessibility:
from .speech import Speaker
self.speaker = Speaker()
def update(self):
if not self.first_call:
self.firstUpdate()
self.first_call = True
def render(self):
pass
def handleEvent(self, event):
if len(self.tab_order) == 0:
return
if event.type == pygame.KEYDOWN:
if event.key == pl.K_TAB:
try:
self.tab_order[self.tab_position].setFocus(False)
except AttributeError:
pass
# some weird problem here
# after restoring the focus of the window by tabbing back into
# it, the mod attribute won't be set correctly
# that's why we will try to guess it here in a different way
if pygame.key.get_mods() & pl.KMOD_LSHIFT == pl.KMOD_LSHIFT or \
pygame.key.get_mods() & pl.KMOD_RSHIFT == \
pl.KMOD_RSHIFT:
self.tab_position -= 1
if self.tab_position < 0:
self.tab_position = len(self.tab_order) - 1
else:
self.tab_position += 1
if self.tab_position >= len(self.tab_order):
self.tab_position = 0
self.speak(self.tab_order[self.tab_position].getLabel(), True)
try:
self.tab_order[self.tab_position].setFocus(True)
except AttributeError:
pass
elif event.key == pl.K_LCTRL or pygame.key == pl.K_RCTRL:
self.speak(self.tab_order[self.tab_position].getLabel(), True)
elif event.key == pl.K_RETURN:
try:
if self.tab_order[self.tab_position].getEnable():
self.tab_order[self.tab_position].getCallback()()
sound = self.tab_order[self.tab_position].getClickSound()
sound.stop()
sound.play()
except (AttributeError, TypeError):
pass
def speak(self, text, interrupt=False):
if not self.display.accessibility:
return
self.speaker.output(text.replace('_'*CARD_PLACEHOLDER_LENGTH, "("+self.display.translator.translate("free space")+")"), interrupt)
# will only be called once the view receives it's first update
def firstUpdate(self):
if len(self.tab_order) == 0:
return
self.speak(self.tab_order[0].getLabel(), False)
try:
self.tab_order[0].setFocus(True)
except AttributeError:
pass
# can be overwritten
# will be called each time the view will be removed
def leave(self):
pass
def setNewTabPosition(self, position):
self.tab_position = position
def getTabOrder(self):
return self.tab_order
|
from sympy import Matrix
from numbers import Number
from CompartmentalSystems.helpers_reservoir import numsol_symbolical_system, numerical_function_from_expression ,make_cut_func_set,f_of_t_maker
from copy import copy,deepcopy
from testinfrastructure.helpers import pe
from scipy.interpolate import interp1d, UnivariateSpline
class BastinModel():
# Bastonification of a reservoir model
def __init__(self,srm,u_expr,z_sym):
self.z_sym=z_sym
self.u_expr=u_expr
crm=deepcopy(srm)
cof= crm.output_fluxes
cif= crm.input_fluxes
# up to now we can only built
# single input single output models
assert(len(cof)==1)
assert(len(cif)==1)
F_SD=list(cof.values())[0]
cif= crm.input_fluxes
#index of the single input receiving pool
ii=list(cif.items())[0][0]
d=cif[ii]
cif[ii] = u_expr*d
crm.input_fluxes=cif
self.state_vector=Matrix(list(srm.state_vector)+[z_sym])
z_dot=F_SD-u_expr*d
#rhs
self.F=Matrix(list(crm.F)+[z_dot])
self.time_symbol=srm.time_symbol
self.srm=crm
class BastinModelRun():
def __init__(self,bm,par_dict, start_values, times, func_dict):
self.bm=bm
self.par_dict=par_dict
self.start_values=start_values
self.times=times
self.func_dict=func_dict
def solve(self):
bm=self.bm
soln = numsol_symbolical_system(
bm.state_vector,
bm.time_symbol,
bm.F,
[self.par_dict],
[self.func_dict],
self.start_values,
self.times
)
return soln[0]
def _flux_funcs(self, expr_dict):
bm = self.bm
srm=bm.srm
sol_funcs = self.sol_funcs()
flux_funcs = {}
tup = tuple(bm.state_vector) + (bm.time_symbol,)
for key, expr in expr_dict.items():
if isinstance(expr,Number):
# if expr is a number like 5.1 lambdify does not create a vectorized function
# so the output is always a number and not an array with identical exprs which is a problem in plots
def expr_func(arg_arr):
return expr*np.ones_like(arg_arr)
flux_funcs[key]=expr_func
else:
ol=numerical_function_from_expression(expr,tup,self.par_dict,self.func_dict)
flux_funcs[key] = f_of_t_maker(sol_funcs, ol)
return flux_funcs
def sol_funcs(self):
"""Return linearly interpolated solution functions.
Returns:
Python function ``f``: ``f(times)`` returns a numpy.array containing the
pool contents at times ``times``.
"""
sol = self.solve()
times = self.times
sol_funcs = []
for i in range(sol.shape[1]):
sol_inter = interp1d(times, sol[:,i])
sol_funcs.append(sol_inter)
return sol_funcs
def external_input_flux_funcs(self):
"""Return a dictionary of the external input fluxes.
The resulting functions base on sol_funcs and are linear interpolations.
Returns:
dict: ``{key: func}`` with ``key`` representing the pool which
receives the input and ``func`` a function of time that returns
a ``float``.
"""
return self._flux_funcs(self.bm.srm.input_fluxes)
def internal_flux_funcs(self):
"""Return a dictionary of the internal fluxes.
The resulting functions base on sol_funcs and are linear interpolations.
Returns:
dict: ``{key: func}`` with ``key=(pool_from, pool_to)`` representing
the pools involved and ``func`` a function of time that returns
a ``float``.
"""
return self._flux_funcs(self.bm.srm.internal_fluxes)
def external_output_flux_funcs(self):
"""Return a dictionary of the external output fluxes.
The resulting functions base on sol_funcs and are linear interpolations.
Returns:
dict: ``{key: func}`` with ``key`` representing the pool from which
the output comes and ``func`` a function of time that returns a
``float``.
"""
return self._flux_funcs(self.bm.srm.output_fluxes)
def phi_num(self,tup):
bm=self.bm
u_num= numerical_function_from_expression(bm.u_expr,tup,self.par_dict,self.func_dict)
return u_num
|
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import stat
import shutil
import subprocess
import sys
import zipfile
import _jsonnet
import requests
import yaml
from ._globals import HELM_CHARTS
TEMPLATES = [
"charts/namespace.yaml",
"charts/prometheus",
"charts/promtail",
"charts/loki",
"charts/grafana",
"promtail",
]
HELM_REPOS = {
"grafana": "https://grafana.github.io/helm-charts",
"loki": "https://grafana.github.io/loki/charts",
"prometheus-community": "https://prometheus-community.github.io/helm-charts",
}
LOOSE_RESOURCES = [
"namespace.yaml",
"configuration",
"dashboards",
"storage",
]
def _create_dashboard_configmaps(output_dir, namespace):
dashboards_dir = os.path.abspath("./dashboards")
output_dir = os.path.join(output_dir, "dashboards")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for dir_path, _, files in os.walk(dashboards_dir):
for dashboard in files:
dashboard_path = os.path.join(dir_path, dashboard)
dashboard_name, ext = os.path.splitext(dashboard)
if ext == ".json":
source = f"--from-file={dashboard_path}"
elif ext == ".jsonnet":
json = _jsonnet.evaluate_file(dashboard_path, ext_codes={"publish": "false"})
source = f"--from-literal={dashboard_name}.json='{json}'"
else:
continue
output_file = f"{output_dir}/{dashboard_name}.dashboard.yaml"
command = (
f"kubectl create configmap {dashboard_name} -o yaml "
f"{source} --dry-run=client --namespace={namespace} "
f"> {output_file}"
)
try:
subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as err:
print(err.output)
with open(output_file, "r") as f:
dashboard_cm = yaml.load(f, Loader=yaml.SafeLoader)
dashboard_cm["metadata"]["labels"] = dict()
dashboard_cm["metadata"]["labels"]["grafana_dashboard"] = dashboard_name
dashboard_cm["data"][f"{dashboard_name}.json"] = dashboard_cm["data"][
f"{dashboard_name}.json"
].replace('"${DS_PROMETHEUS}"', "null")
with open(output_file, "w") as f:
yaml.dump(dashboard_cm, f)
def _create_promtail_configs(config, output_dir):
if not os.path.exists(os.path.join(output_dir, "promtail")):
os.mkdir(os.path.join(output_dir, "promtail"))
with open(os.path.join(output_dir, "promtailLocalConfig.yaml")) as f:
for promtail_config in yaml.load_all(f, Loader=yaml.SafeLoader):
with open(
os.path.join(
output_dir,
"promtail",
"promtail-%s"
% promtail_config["scrape_configs"][0]["static_configs"][0][
"labels"
]["host"],
),
"w",
) as f:
yaml.dump(promtail_config, f)
os.remove(os.path.join(output_dir, "promtailLocalConfig.yaml"))
if not config["tls"]["skipVerify"]:
try:
with open(
os.path.join(output_dir, "promtail", "promtail.ca.crt"), "w"
) as f:
f.write(config["tls"]["caCert"])
except TypeError:
print("CA certificate for TLS verification has to be given.")
def _download_promtail(output_dir):
with open(os.path.abspath("./promtail/VERSION"), "r") as f:
promtail_version = f.readlines()[0].strip()
output_dir = os.path.join(output_dir, "promtail")
output_zip = os.path.join(output_dir, "promtail.zip")
response = requests.get(
"https://github.com/grafana/loki/releases/download/v%s/promtail-linux-amd64.zip"
% promtail_version,
stream=True,
)
with open(output_zip, "wb") as f:
for chunk in response.iter_content(chunk_size=512):
f.write(chunk)
with zipfile.ZipFile(output_zip) as f:
f.extractall(output_dir)
promtail_exe = os.path.join(output_dir, "promtail-linux-amd64")
os.chmod(
promtail_exe,
os.stat(promtail_exe).st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH,
)
os.remove(output_zip)
def _run_ytt(config, output_dir):
config_string = "#@data/values\n---\n"
config_string += yaml.dump(config)
command = [
"ytt",
]
for template in TEMPLATES:
command += ["-f", template]
command += [
"--output-files",
output_dir,
"--ignore-unknown-comments",
"-f",
"-",
]
try:
# pylint: disable=E1123
print(subprocess.check_output(command, input=config_string, text=True))
except subprocess.CalledProcessError as err:
print(err.output)
def _update_helm_repos():
for repo, url in HELM_REPOS.items():
command = ["helm", "repo", "add", repo, url]
try:
subprocess.check_output(" ".join(command), shell=True)
except subprocess.CalledProcessError as err:
print(err.output)
try:
print(subprocess.check_output(["helm", "repo", "update"]).decode("utf-8"))
except subprocess.CalledProcessError as err:
print(err.output)
def _deploy_loose_resources(output_dir):
for resource in LOOSE_RESOURCES:
command = [
"kubectl",
"apply",
"-f",
f"{output_dir}/{resource}",
]
print(subprocess.check_output(command).decode("utf-8"))
def _get_installed_charts_in_namespace(namespace):
command = ["helm", "ls", "-n", namespace, "--short"]
return subprocess.check_output(command).decode("utf-8").split("\n")
def _install_or_update_charts(output_dir, namespace):
installed_charts = _get_installed_charts_in_namespace(namespace)
charts_path = os.path.abspath("./charts")
for chart, repo in HELM_CHARTS.items():
chart_name = chart + "-" + namespace
with open(f"{charts_path}/{chart}/VERSION", "r") as f:
chart_version = f.readlines()[0].strip()
command = ["helm"]
command.append("upgrade" if chart_name in installed_charts else "install")
command += [
chart_name,
repo,
"--version",
chart_version,
"--values",
f"{output_dir}/{chart}.yaml",
"--namespace",
namespace,
]
try:
print(subprocess.check_output(command).decode("utf-8"))
except subprocess.CalledProcessError as err:
print(err.output)
def install(config_manager, output_dir, dryrun, update_repo):
"""Create the final configuration for the helm charts and Kubernetes resources
and install them to Kubernetes, if not run in --dryrun mode.
Arguments:
config_manager {AbstractConfigManager} -- ConfigManager that contains the
configuration of the monitoring setup to be uninstalled.
output_dir {string} -- Path to the directory where the generated files
should be safed in
dryrun {boolean} -- Whether the installation will be run in dryrun mode
update_repo {boolean} -- Whether to update the helm repositories locally
"""
config = config_manager.get_config()
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif os.listdir(output_dir):
while True:
response = input(
(
"Output directory already exists. This may lead to file conflicts "
"and unwanted configuration applied to the cluster. Do you want "
"to empty the directory? [y/n] "
)
)
if response == "y":
shutil.rmtree(output_dir)
os.mkdir(output_dir)
break
if response == "n":
print("Aborting installation. Please provide empty directory.")
sys.exit(1)
print("Unknown input.")
_run_ytt(config, output_dir)
namespace = config_manager.get_config()["namespace"]
_create_dashboard_configmaps(output_dir, namespace)
if os.path.exists(os.path.join(output_dir, "promtailLocalConfig.yaml")):
_create_promtail_configs(config, output_dir)
if not dryrun:
_download_promtail(output_dir)
if not dryrun:
if update_repo:
_update_helm_repos()
_deploy_loose_resources(output_dir)
_install_or_update_charts(output_dir, namespace)
|
import multiprocessing
from time import time
import socket
import websocket
from socscrollsave.core.constants import Constants
from socscrollsave.common.logger import Logger as Log
TAG = "Socket"
class SocketProcess(multiprocessing.Process):
"""
Socket client
"""
def __init__(self, parser_process):
"""
Initialises values for process.
:param parser_process: Reference to a ParserProcess instance.
:type parser_process: ParserProcess
"""
multiprocessing.Process.__init__(self)
self._exit = multiprocessing.Event()
self._parser = parser_process
self._socket_client = websocket.WebSocket()
Log.i(TAG, "Process Ready")
def open(self, port='ger', speed='million', timeout=0.01):
try:
self._socket_client.connect("ws://192.168.4.1:81/")
Log.i(TAG, "Socket open")
return True
except TimeoutError:
Log.w(TAG, "Error")
return False
def run(self):
"""
Reads the socket until a stop call is made.
:return:
"""
buffer_size = 20
Log.i(TAG, "Process starting...")
timestamp = time()
while not self._exit.is_set():
stamp = time() - timestamp
try:
data = self._socket_client.recv()
#print(data)
if len(data) > 0:
self._parser.add([stamp, data])
except:
print("Data_error")
Log.i(TAG, "Process finished")
def stop(self):
"""
Signals the process to stop acquiring data.
:return:
"""
Log.i(TAG, "Process finishing...")
self._socket_client.close()
self._exit.set()
|
try:
from src import __wallaby_local as w # for VSCode support
except ImportError:
import imp; wallaby = imp.load_source('wallaby', '/usr/lib/wallaby.py')
import wallaby as w # so it works on actual robot
from src.helpers.functions import map
MOTOR_MAX_TIME = 5.0
"""This is multiplied by `motor.speed` to achieve the time in
seconds required to move the motor.
"""
class base_motor:
"""The base class for motors and servos. Provides a common initializer for
both by providing a port and speed value.
"""
def __init__(self, port, speed):
"""Creates a new base_motor.
Arguments:
port {int} -- The GPIO port of the motor. See Wallaby documentation for details.
speed {float} -- The speed of the motor. 0.0 is the slowest, 1.0 is the fastest.
"""
self.port = port
self.speed = speed
|
# Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 10.2.0
from . import AWSObject
from . import AWSProperty
from .validators import boolean
from .validators import integer
class EBSStorageInfo(AWSProperty):
props = {
'VolumeSize': (integer, False),
}
class StorageInfo(AWSProperty):
props = {
'EBSStorageInfo': (EBSStorageInfo, False),
}
class BrokerNodeGroupInfo(AWSProperty):
props = {
'BrokerAZDistribution': (basestring, False),
'ClientSubnets': ([basestring], True),
'InstanceType': (basestring, True),
'SecurityGroups': ([basestring], False),
'StorageInfo': (StorageInfo, False),
}
class Tls(AWSProperty):
props = {
'CertificateAuthorityArnList': ([basestring], False),
}
class ClientAuthentication(AWSProperty):
props = {
'Tls': (Tls, False),
}
class ConfigurationInfo(AWSProperty):
props = {
'Arn': (basestring, True),
'Revision': (integer, True),
}
class EncryptionAtRest(AWSProperty):
props = {
'DataVolumeKMSKeyId': (basestring, True),
}
class EncryptionInTransit(AWSProperty):
props = {
'ClientBroker': (basestring, False),
'InCluster': (boolean, False),
}
class EncryptionInfo(AWSProperty):
props = {
'EncryptionAtRest': (EncryptionAtRest, False),
'EncryptionInTransit': (EncryptionInTransit, False),
}
class JmxExporter(AWSProperty):
props = {
'EnabledInBroker': (boolean, True),
}
class NodeExporter(AWSProperty):
props = {
'EnabledInBroker': (boolean, True),
}
class Prometheus(AWSProperty):
props = {
'JmxExporter': (JmxExporter, False),
'NodeExporter': (NodeExporter, False),
}
class OpenMonitoring(AWSProperty):
props = {
'Prometheus': (Prometheus, True),
}
class Firehose(AWSProperty):
props = {
'DeliveryStream': (basestring, True),
"Enabled": (boolean, True),
}
class CloudWatchLogs(AWSProperty):
props = {
'Enabled': (boolean, True),
'LogGroup': (basestring, False),
}
class S3(AWSProperty):
props = {
'Bucket': (basestring, False),
'Enabled': (boolean, True),
'Prefix': (basestring, False),
}
class BrokerLogs(AWSProperty):
props = {
'CloudWatchLogs': (CloudWatchLogs, False),
'Firehose': (Firehose, False),
'S3': (S3, False),
}
class LoggingInfo(AWSProperty):
props = {
'BrokerLogs': (BrokerLogs, True),
}
class Cluster(AWSObject):
resource_type = "AWS::MSK::Cluster"
props = {
'BrokerNodeGroupInfo': (BrokerNodeGroupInfo, True),
'ClientAuthentication': (ClientAuthentication, False),
'ClusterName': (basestring, True),
'ConfigurationInfo': (ConfigurationInfo, False),
'EncryptionInfo': (EncryptionInfo, False),
'EnhancedMonitoring': (basestring, False),
'KafkaVersion': (basestring, True),
'LoggingInfo': (LoggingInfo, False),
'NumberOfBrokerNodes': (integer, True),
'OpenMonitoring': (OpenMonitoring, False),
'Tags': (dict, False),
}
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import unittest
from commoncode import version
class TestVersionHint(unittest.TestCase):
def test_version_hint(self):
data = {
'/xmlgraphics/fop/source/fop-1.0-src.zip': '1.0',
'/xml/xindice/xml-xindice-1.2m1-src.zip': '1.2m1',
'/xmlgraphics/fop/binaries/fop-0.94-bin-jdk1.3.tar.gz': '0.94',
'/xmlgraphics/batik/batik-src-1.7beta1.zip': '1.7beta1',
'/xmlgraphics/batik/batik-1.7-jre13.zip': '1.7',
'/xmlbeans/source/xmlbeans-2.3.0-src.tgz': '2.3.0',
'/xml/xindice/source/xml-xindice-1.2m1-src.tar.gz': '1.2m1',
'/xml/xerces-p/binaries/XML-Xerces-2.3.0-4-win32.zip': '2.3.0-4',
'/xml/xerces-p/source/XML-Xerces-2.3.0-3.tar.gz': '2.3.0-3',
'/xml/xalan-j/source/xalan-j_2_7_0-src-2jars.tar.gz': '2_7_0',
'/xml/security/java-library/xml-security-src-1_0_5D2.zip': '1_0_5D2',
'/xml/commons/binaries/xml-commons-external-1.4.01-bin.zip': '1.4.01',
'/xml/commons/xml-commons-1.0.b2.zip': '1.0.b2',
'/xml/cocoon/3.0/cocoon-all-3.0.0-alpha-1-dist.tar.gz': '3.0.0-alpha-1',
'/xerces/j/source/Xerces-J-tools.2.10.0-xml-schema-1.1-beta.tar.gz': '2.10.0',
'/xerces/c/3/binaries/xerces-c-3.1.1-x86_64-solaris-cc-5.10.tar.gz': '3.1.1',
'/xerces/c/3/binaries/xerces-c-3.1.1-x86_64-windows-vc-8.0.zip': '3.1.1',
'/xerces/c/2/binaries/xerces-c_2_8_0-x86-windows-vc_7_1.zip': '2_8_0',
'/ws/woden/1.0M8/apache-woden-src-1.0M8.tar.gz': '1.0M8',
'/ws/scout/0_7rc1/source/scout-0.7rc1-src.zip': '0.7rc1',
'/ws/juddi/3_0/juddi-portal-bundle-3.0.0.rc1.zip': '3.0.0.rc1',
'/ws/juddi/3_0/juddi-portal-bundle-3.0.0.beta.zip': '3.0.0.beta',
'/ws/juddi/2_0RC7/juddi-tomcat-2.0rc7.zip': '2.0rc7',
'/ws/axis2/tools/1_4_1/axis2-wsdl2code-maven-plugin-1.4.1.jar': '1.4.1',
'/ws/axis/1_4/axis-src-1_4.zip': '1_4',
'/ws/axis-c/source/win32/axis-c-1.6b-Win32-trace-src.zip': '1.6b',
'/tuscany/java/sca/2.0-M5/apache-tuscany-sca-all-2.0-M5-src.tar.gz': '2.0-M5',
'/turbine/turbine-2.3.3-rc1/source/turbine-2.3.3-RC1-src.zip': '2.3.3-RC1',
'/tomcat/tomcat-connectors/jk/binaries/win64/jk-1.2.30/ia64/symbols-1.2.30.zip': '1.2.30',
'/tomcat/tomcat-7/v7.0.0-beta/bin/apache-tomcat-7.0.0-windows-i64.zip': '7.0.0',
'/tomcat/tomcat-4/v4.1.40/bin/apache-tomcat-4.1.40-LE-jdk14.exe': '4.1.40',
'/tapestry/tapestry-src-5.1.0.5.tar.gz': '5.1.0.5',
'/spamassassin/source/Mail-SpamAssassin-rules-3.3.0.r901671.tgz': '3.3.0.r901671',
'/spamassassin/Mail-SpamAssassin-rules-3.3.1.r923257.tgz': '3.3.1.r923257',
'/shindig/1.1-BETA5-incubating/shindig-1.1-BETA5-incubating-source.zip': '1.1-BETA5',
'/servicemix/nmr/1.0.0-m3/apache-servicemix-nmr-1.0.0-m3-src.tar.gz': '1.0.0-m3',
'/qpid/0.6/qpid-dotnet-0-10-0.6.zip': '0.6',
'/openjpa/2.0.0-beta/apache-openjpa-2.0.0-beta-binary.zip': '2.0.0-beta',
'/myfaces/source/portlet-bridge-2.0.0-alpha-2-src-all.tar.gz': '2.0.0-alpha-2',
'/myfaces/source/myfaces-extval20-2.0.3-src.tar.gz': '2.0.3',
'/harmony/milestones/6.0/debian/amd64/harmony-6.0-classlib_0.0r946981-1_amd64.deb': '6.0',
'/geronimo/eclipse/updates/plugins/org.apache.geronimo.st.v21.ui_2.1.1.jar': '2.1.1',
'/directory/studio/update/1.x/plugins/org.apache.directory.studio.aciitemeditor_1.5.2.v20091211.jar': '1.5.2.v20091211',
'/db/torque/torque-3.3/source/torque-gen-3.3-RC3-src.zip': '3.3-RC3',
'/cayenne/cayenne-3.0B1.tar.gz': '3.0B1',
'/cayenne/cayenne-3.0M4-macosx.dmg': '3.0M4',
'/xmlgraphics/batik/batik-docs-current.zip': 'current',
'/xmlgraphics/batik/batik-docs-previous.zip': 'previous',
'/poi/dev/bin/poi-bin-3.7-beta1-20100620.zip': '3.7-beta1-20100620',
'/excalibur/avalon-logkit/source/excalibur-logkit-2.0.dev-0-src.zip': '2.0.dev-0',
'/db/derby/db-derby-10.4.2.0/derby_core_plugin_10.4.2.zip': '10.4.2',
'/httpd/modpython/win/2.7.1/mp152dll.zip': '2.7.1',
'/perl/mod_perl-1.31/apaci/mod_perl.config.sh': '1.31',
'/xml/xerces-j/old_xerces2/Xerces-J-bin.2.0.0.alpha.zip': '2.0.0.alpha',
'/xml/xerces-p/archives/XML-Xerces-1.7.0_0.tar.gz': '1.7.0_0',
'/httpd/docs/tools-2004-05-04.zip': '2004-05-04',
'/ws/axis2/c/M0_5/axis2c-src-M0.5.tar.gz': 'M0.5',
'/jakarta/poi/dev/src/jakarta-poi-1.8.0-dev-src.zip': '1.8.0-dev',
'/tapestry/tapestry-4.0-beta-8.zip': '4.0-beta-8',
'/openejb/3.0-beta-1/openejb-3.0-beta-1.zip': '3.0-beta-1',
'/tapestry/tapestry-4.0-rc-1.zip': '4.0-rc-1',
'/jakarta/tapestry/source/3.0-rc-3/Tapestry-3.0-rc-3-src.zip': '3.0-rc-3',
'/jakarta/lucene/binaries/lucene-1.3-final.tar.gz': '1.3-final',
'/jakarta/tapestry/binaries/3.0-beta-1a/Tapestry-3.0-beta-1a-bin.zip': '3.0-beta-1a',
'/poi/release/bin/poi-bin-3.0-FINAL-20070503.tar.gz': '3.0-FINAL-20070503',
'/harmony/milestones/M4/apache-harmony-hdk-r603534-linux-x86-32-libstdc++v6-snapshot.tar.gz': 'r603534',
'/ant/antidote/antidote-20050330.tar.bz2': '20050330',
'/apr/not-released/apr_20020725223645.tar.gz': '20020725223645',
'/ibatis/source/ibatis.net/src-revision-709676.zip': 'revision-709676',
'/ws/axis-c/source/win32/axis-c-src-1-2-win32.zip': '1-2',
'/jakarta/slide/most-recent-2.0rc1-binaries/jakarta-slide 2.0rc1 jakarta-tomcat-4.1.30.zip': '2.0rc1',
'/httpd/modpython/win/3.0.1/python2.2.1-apache2.0.43.zip': '2.2.1',
'/ant/ivyde/updatesite/features/org.apache.ivy.feature_2.1.0.cr1_20090319213629.jar': '2.1.0.cr1_20090319213629',
'/jakarta/poi/dev/bin/poi-2.0-pre1-20030517.jar': '2.0-pre1-20030517',
'/jakarta/poi/release/bin/jakarta-poi-1.5.0-FINAL-bin.zip': '1.5.0-FINAL',
'/jakarta/poi/release/bin/poi-bin-2.0-final-20040126.zip': '2.0-final-20040126',
'/activemq/apache-activemq/5.0.0/apache-activemq-5.0.0-sources.jar': '5.0.0',
'/turbine/turbine-2.2/source/jakarta-turbine-2.2-B1.tar.gz': '2.2-B1',
'/ant/ivyde/updatesite/features/org.apache.ivy.feature_2.0.0.cr1.jar': '2.0.0.cr1',
'/ant/ivyde/updatesite/features/org.apache.ivy.feature_2.0.0.final_20090108225011.jar': '2.0.0.final_20090108225011',
'/ws/axis/1_2RC3/axis-src-1_2RC3.zip': '1_2RC3',
'/commons/lang/old/v1.0-b1.1/commons-lang-1.0-b1.1.zip': '1.0-b1.1',
'/commons/net/binaries/commons-net-1.2.0-release.tar.gz': '1.2.0-release',
'/ant/ivyde/2.0.0.final/apache-ivyde-2.0.0.final-200907011148-RELEASE.tgz': '2.0.0.final-200907011148-RELEASE',
'/geronimo/eclipse/updates/plugins/org.apache.geronimo.jetty.j2ee.server.v11_1.0.0.jar': 'v11_1.0.0',
'/jakarta/cactus/binaries/jakarta-cactus-13-1.7.1-fixed.zip': '1.7.1-fixed',
'/jakarta/jakarta-turbine-maven/maven/jars/maven-1.0-b5-dev.20020731.085427.jar': '1.0-b5-dev.20020731.085427',
'/xml/xalan-j/source/xalan-j_2_5_D1-src.tar.gz': '2_5_D1',
'/ws/woden/IBuilds/I20051002_1145/woden-I20051002_1145.tar.bz2': 'I20051002_1145',
'/commons/beanutils/source/commons-beanutils-1.8.0-BETA-src.tar.gz': '1.8.0-BETA',
'/cocoon/BINARIES/cocoon-2.0.3-vm14-bin.tar.gz': '2.0.3-vm14',
'/felix/xliff_filters_v1_2_7_unix.jar': 'v1_2_7',
'/excalibur/releases/200702/excalibur-javadoc-r508111-15022007.tar.gz': 'r508111-15022007',
'/geronimo/eclipse/updates/features/org.apache.geronimo.v20.feature_2.0.0.jar': 'v20.feature_2.0.0',
'/geronimo/2.1.6/axis2-jaxws-1.3-G20090406.jar': '1.3-G20090406',
'/cassandra/debian/pool/main/c/cassandra/cassandra_0.4.0~beta1-1.diff.gz': '0.4.0~beta1-1',
'/ha-api-3.1.6.jar': '3.1.6',
'ha-api-3.1.6.jar': '3.1.6'
}
# FIXME: generate a test function for each case
for path in data:
expected = data[path]
if not expected.lower().startswith('v'):
expected = 'v ' + expected
assert expected == version.hint(path)
|
"""Tests for the object departures module."""
import responses
# initialize package, and does not mix up names
import test as _test
import navitia_client
import requests
class DeparturesTest(_test.TestCase):
def setUp(self):
self.user = 'leo'
self.core_url = "https://api.navitia.io/v1/"
self.client = navitia_client.Client(self.user)
self.coords = '2.333333;48.866667'
def test_no_region_nor_coords(self):
# Should raise error if no region nor coords specified
pass
|
"""
cloudRemover.py, Sam Murphy (2017-07-11)
Collection of cloud removal methods for Sentinel 2 and Landsat
for details: https://github.com/samsammurphy/cloud-masking-sentinel2
"""
import ee
import math
def ESAclouds(toa):
"""
European Space Agency (ESA) clouds from 'QA60', i.e. Quality Assessment band at 60m
parsed by Nick Clinton
"""
qa = toa.select('QA60')
# bits 10 and 11 are clouds and cirrus
cloudBitMask = int(2**10)
cirrusBitMask = int(2**11)
# both flags set to zero indicates clear conditions.
clear = qa.bitwiseAnd(cloudBitMask).eq(0).And(\
qa.bitwiseAnd(cirrusBitMask).eq(0))
# cloud is not clear
cloud = clear.eq(0)
return cloud
def shadowMask(toa,cloudMask):
"""
Finds cloud shadows in images
Originally by Gennadii Donchyts, adapted by Ian Housman
"""
def potentialShadow(cloudHeight):
"""
Finds potential shadow areas from array of cloud heights
returns an image stack (i.e. list of images)
"""
cloudHeight = ee.Number(cloudHeight)
# shadow vector length
shadowVector = zenith.tan().multiply(cloudHeight)
# x and y components of shadow vector length
x = azimuth.cos().multiply(shadowVector).divide(nominalScale).round()
y = azimuth.sin().multiply(shadowVector).divide(nominalScale).round()
# affine translation of clouds
cloudShift = cloudMask.changeProj(cloudMask.projection(), cloudMask.projection().translate(x, y)) # could incorporate shadow stretch?
return cloudShift
# solar geometry (radians)
azimuth = ee.Number(toa.get('solar_azimuth')).multiply(math.pi).divide(180.0).add(ee.Number(0.5).multiply(math.pi))
zenith = ee.Number(0.5).multiply(math.pi ).subtract(ee.Number(toa.get('solar_zenith')).multiply(math.pi).divide(180.0))
# find potential shadow areas based on cloud and solar geometry
nominalScale = cloudMask.projection().nominalScale()
cloudHeights = ee.List.sequence(500,4000,500)
potentialShadowStack = cloudHeights.map(potentialShadow)
potentialShadow = ee.ImageCollection.fromImages(potentialShadowStack).max()
# shadows are not clouds
potentialShadow = potentialShadow.And(cloudMask.Not())
# (modified) dark pixel detection
darkPixels = toa.normalizedDifference(['green', 'swir2']).gt(0.25)
# shadows are dark
shadow = potentialShadow.And(darkPixels).rename(['shadows'])
# might be scope for one last check here. Dark surfaces (e.g. water, basalt, etc.) cause shadow commission errors.
# perhaps using a NDWI (e.g. green and nir)
return shadow
#
class CloudRemover:
ESAclouds = ESAclouds
shadowMask = shadowMask
def sentinel2mask(img):
"""
Masks cloud (and shadow) pixels from Sentinel 2 image
"""
# top of atmosphere reflectance
toa = img.select(['B1','B2','B3','B4','B6','B8A','B9','B10', 'B11','B12'],\
['aerosol', 'blue', 'green', 'red', 'red2','red4','h2o', 'cirrus','swir1', 'swir2'])\
.divide(10000).addBands(img.select('QA60'))\
.set('solar_azimuth',img.get('MEAN_SOLAR_AZIMUTH_ANGLE'))\
.set('solar_zenith',img.get('MEAN_SOLAR_ZENITH_ANGLE'))
# ESA clouds
ESAcloud = CloudRemover.ESAclouds(toa)
# Shadow
shadow = CloudRemover.shadowMask(toa, ESAcloud)
# cloud and shadow mask
mask = ESAcloud.Or(shadow).eq(0)
return img.updateMask(mask)
def landsatMask(img):
"""
Masks cloud (and shadow) pixels from Landsat images
"""
# FMASK
fmask = img.select('fmask')
# cloud and shadow
cloud = fmask.eq(4)
shadow = fmask.eq(2)
# cloudFree pixels are not cloud or shadow
cloudFree = cloud.Or(shadow).eq(0)
return img.updateMask(cloudFree)
def fromMission(mission):
switch = {
'sentinel2': CloudRemover.sentinel2mask,
'landsat8': CloudRemover.landsatMask,
'landsat7': CloudRemover.landsatMask,
'landsat5': CloudRemover.landsatMask,
'landsat4': CloudRemover.landsatMask,
}
return switch[mission.lower()]
|
# models.py
# cavaliba.app_sirene
# (C) Cavaliba - 2020
import datetime
from datetime import datetime, timedelta
from django.db import models
from django.utils import timezone
from django.forms import ModelForm
# null : DB
# blank : forms
# char/text : null=False
# -------------------------------------------------------------
# Contacts
# -------------------------------------------------------------
class Contact(models.Model):
email = models.CharField('Email', max_length=128, blank=False)
mobile = models.CharField('GSM', max_length=15, blank=False)
firstname = models.CharField('Prenom', max_length=128, blank=True)
lastname = models.CharField('Nom', max_length=128, blank=True)
is_active = models.BooleanField('Actif', default=True)
want_email= models.BooleanField('Email', default=True)
want_sms = models.BooleanField('SMS', default=True)
comment = models.CharField('Commentaire', max_length=128, blank=True)
class Meta:
ordering = ['pk','lastname','firstname']
def __str__(self):
return self.email
class ContactGroup(models.Model):
name = models.CharField('Name', max_length=128, blank=False)
description = models.TextField('Description', max_length=300, blank=True)
contacts = models.ManyToManyField(Contact, blank=True)
class Meta:
ordering = ['name']
def size(self):
return self.contacts.count()
def __str__(self):
return self.name
# -------------------------------------------------------------
# Site
# -------------------------------------------------------------
class Site(models.Model):
name = models.CharField('Label', max_length=32, blank=False)
description = models.CharField('Description', max_length=128, blank=False)
class Meta:
ordering = ['name']
def __str__(self):
return self.name + ' (' + self.description + ')'
# -------------------------------------------------------------
# Service
# -------------------------------------------------------------
class Service(models.Model):
name = models.CharField('Label', max_length=64, blank=False)
description = models.CharField('Description', max_length=128, blank=False)
class Meta:
ordering = ['name']
def __str__(self):
return self.name + ' (' + self.description + ')'
# -------------------------------------------------------------
# Info
# -------------------------------------------------------------
INFO_CATEGORY = (
(0, "N/A"),
(1, "Incident"),
(2, "Maintenance"),
(3, "Information"),
)
INFO_STATUS = (
(0, "N/A"),
(1, "Planifié"),
(2, "En cours"),
(3, "Terminé"),
)
INFO_PRIORITY = (
(0,"N/A"),
(1,"P1 - Critique"),
(2,"P2 - Haute"),
(3,"P3 - Moyenne"),
(4,"P4 - Faible"),
)
class Info(models.Model):
category = models.IntegerField('Catégorie', choices=INFO_CATEGORY, default=1)
title = models.CharField('Titre', max_length=120, blank=False)
status = models.IntegerField('Etat', choices=INFO_STATUS, default=2)
priority = models.IntegerField('Priorité', choices=INFO_PRIORITY, default=0)
start = models.DateTimeField('Début', blank=False, default=datetime.now,
help_text="Format : dd/mm/YYYY hh:mm:ss")
duration = models.IntegerField('Durée', default=0, blank=True, null=True,
help_text="Durée estimée/définitive en minutes.")
downtime = models.IntegerField('Indisponibilité', default=0, blank=True, null=True,
help_text="Indisponibilité estimée/définitive en minutes.")
detail = models.TextField('Description', max_length=4000, blank=True, null=True)
services = models.ManyToManyField(Service, blank=True)
sites = models.ManyToManyField(Site, blank=True)
# notifications
notify_groups = models.ManyToManyField(ContactGroup, blank=True)
send_email = models.BooleanField('Envoi de mail', default=True)
send_sms = models.BooleanField('Envoi de SMS', default=False)
# template
is_template = models.BooleanField('Modèle', default=False)
template_name = models.CharField('Nom de modele', max_length=120, blank=True)
# meta
author = models.CharField('Auteur', max_length=200, blank=False,
help_text="Saisir ici l'identité du déclarant de l'événement.")
visible = models.BooleanField('Visible', default=True)
# created_on = models.DateTimeField('Création', auto_now_add=True)
# updated_on = models.DateTimeField('Mise à jour', auto_now_add=True)
created_on = models.DateTimeField('Création',editable=False)
updated_on = models.DateTimeField('Mise à jour',editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created_on = timezone.now()
self.updated_on = timezone.now()
return super(Info, self).save(*args, **kwargs)
class Meta:
ordering = ['start', 'priority', 'category']
def __str__(self):
return self.title
|
import pandas as pd
def get_dataset_dtypes(dataframe_list):
# dataframe_list
# List of pandas dataframe objects
###
# Student code (create additional functions as necessary)
###
# mock-up for demonstration - remove after development
# this is only a partial column list
# actual list will come from columns in each dataframe
relationship_dict = {'airlines': {},
'airports': {},
'flights': {},
'trip_logs': {}}
relationship_dict['airlines']['carrier'] = {'dtype':'O'}
relationship_dict['airports']['dest'] = {'dtype': 'O'}
relationship_dict['flights']['dest'] = {'dtype': 'O'}
relationship_dict['flights']['carrier'] = {'dtype': 'O'}
relationship_dict['flights']['flight_id'] = {'dtype': 'O'}
relationship_dict['trip_logs']['flight_id'] = {'dtype': 'O'}
# return relationship structure
return relationship_dict
def find_primary_key_candidates(dataframe_list, relationship_dict=None):
# dataframe_list
# List of pandas dataframe objects
#
# relationship_dict
# This is an existing relationship_dict. If None, a new
# relationship_dict should be created
# adds relationship_dict if there isn't one
for table in dataframe_list:
if table not in relationship_dict:
relationship_dict[table] = {}
print('table: ', end='')
print(table)
for col in dataframe_list[table].columns:
if col not in relationship_dict[table]:
relationship_dict[table][col] = {}
print(' col: ', end='')
print(col, end=': ')
# primary key candidate must have unique values
total = dataframe_list[table][col].count()
unique = dataframe_list[table][col].nunique()
print(total, end=': ')
print(unique)
if total == unique:
relationship_dict[table][col]['key_candidate'] = True
print('found a primary key candidate')
else:
relationship_dict[table][col]['key_candidate'] = False
###
# Student code (create additional functions as necessary)
###
# mock-up for demonstration - remove after development
# relationship_dict['airlines']['carrier']['key_candidate'] = True
# relationship_dict['airports']['dest']['key_candidate'] = True
# relationship_dict['flights']['dest']['key_candidate'] = False
# relationship_dict['flights']['carrier']['key_candidate'] = False
# relationship_dict['flights']['flight_id']['key_candidate'] = True
# relationship_dict['trip_logs']['flight_id']['key_candidate'] = False
# return relationship structure
return relationship_dict
|
import os
import time
import torch
import torch.nn as nn
from torch.optim import SGD
from modules.binarizer import L1ColBinarizer, L1RowBinarizer, MagnitudeBinarizer
from modules.masked_nn import MaskedLinear
# # Returns mask of inputs matrix
# # mask = 0 in rows of inputs with smallest L1 norm
# def l1_percentage_mask(inputs:torch.Tensor, threshold: float):
# mask = inputs.clone() # use clone for gradient prop
# # calculate norms of each matrix
# L1_mean = torch.mean(inputs.abs(), dim=1)
# # sort
# _, idx = L1_mean.sort(descending=True)
# num_to_keep = int(threshold*L1_mean.numel())
# mask[idx[:num_to_keep],:] = 1.0 # largest num_to_keep rows are kept by writing one to their mask
# mask[idx[num_to_keep:],:] = 0.0
# return mask
class MLP(nn.Module):
def __init__(self,
in_features: int,
classes: int,
pruning_method: str = "row",
):
super(MLP, self).__init__()
dim1=in_features
dim2=int(in_features)
dim3=int(in_features/4)
self.lin1 = MaskedLinear(in_features=dim1, out_features=dim2, bias=True, mask_init="constant", mask_scale=0.0, pruning_method=pruning_method)
self.lin2 = MaskedLinear(in_features=dim2, out_features=dim3, bias=True, mask_init="constant", mask_scale=0.0, pruning_method=pruning_method)
self.lin3 = nn.Linear(in_features=dim3, out_features=classes, bias=True)
self.in_features = in_features
self.classes = classes
self.pruning_method = pruning_method
def forward(self, inputs, threshold):
x = self.lin1(inputs, threshold)
x = nn.functional.relu(x)
x = self.lin2(x, threshold)
x = nn.functional.relu(x)
x = self.lin3(x, threshold)
return x
class MLPNeuronMasked(nn.Module):
def __init__(self,
in_features: int,
classes: int,
):
super(MLPNeuronMasked, self).__init__()
dim1=in_features
dim2=int(in_features)
dim3=int(in_features/4)
self.lin1 = nn.Linear(in_features=dim1, out_features=dim2, bias=True)
self.lin1.mask_scores = torch.ones(dim2, dtype=torch.float, requires_grad=True) # scores to regularize which will choose neurons
self.lin1.mask = torch.ones(dim2, dtype=torch.float, requires_grad=False) # binary mask to disable neurons after pruning
self.lin2 = nn.Linear(in_features=dim2, out_features=dim3, bias=True)
self.lin2.mask_scores = torch.ones(dim3, dtype=torch.float, requires_grad=True)
self.lin2.mask = torch.ones(dim2, dtype=torch.float, requires_grad=False)
self.lin3 = nn.Linear(in_features=dim3, out_features=classes, bias=True)
self.in_features = in_features
self.classes = classes
def forward(self, inputs):
x = self.lin1(inputs)
x = nn.functional.relu(x)
xmasked = x * self.lin1.mask_scores # Data type got changed to float64???
x = self.lin2(xmasked)
x = nn.functional.relu(x)
x = x * self.lin2.mask_scores #* self.mask2
x = self.lin3(x)
return x
# def prune_neurons(self, self. ):
# def self_test(self, threshold=0.6):
# optimizer = torch.optim.SGD(self.parameters(), lr=.1)
# dummy_data=torch.rand((self.in_features)).unsqueeze(0)
# dummy_target=torch.rand((self.classes)).unsqueeze(0)
# for thresh in [1.0,.8,.6,.4,.2, .1, .05]:
# print('Threshold = ', thresh)
# for i in range (5):
# optimizer.zero_grad()
# out = self.forward(dummy_data, threshold=thresh)
# # print(out.size())
# # print(out)
# loss=nn.functional.mse_loss(out, dummy_target)
# print(loss.item())
# loss.backward()
# optimizer.step()
# optimizer.zero_grad()
# out = self.forward(dummy_data, threshold=0.9)
# # print(out.size())
# print(out)
# loss=nn.functional.cross_entropy(out, torch.Tensor([1]).long())
# loss.backward()
# optimizer.step()
# test pruning with simple MLP and dummy data
# dim=10
# classes=10
# model = MLP(dim, classes, "topK")
# model.self_test(threshold=0.6)
# test with mnist
import torchvision
def mnist_eval(model, test_loader, threshold, device):
total=0; correct=0
for idx, batch in enumerate(test_loader):
batch_x = torch.flatten(batch[0], start_dim=1).to(device)
batch_y = batch[1].to(device)
out = model(batch_x, threshold)
pred_idx = torch.argmax(out, dim=-1)
correct += torch.sum(pred_idx == batch_y)
total+=batch_x.shape[0]
accuracy = correct*1.0/(total)
print('threshold: ', threshold, 'acc: ', accuracy)
def mnist_neuron_pruning(num_fine_tune_epochs, lambdas, model_dir, model_dir_out):
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
device='cpu'
train_data = torchvision.datasets.MNIST('/home/ahoffman/research/transformers/examples/alex/struct-pruning/emmental', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_batch_size=32
test_data = torchvision.datasets.MNIST('/home/ahoffman/research/transformers/examples/alex/struct-pruning/emmental', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
test_loader = torch.utils.data.DataLoader(test_data, batch_size=test_batch_size, shuffle=True)
model = MLPNeuronMasked(28*28, 10).to(device)
# load pretrained model if possible to save training time
if os.path.exists(model_dir):
model.load_state_dict(torch.load(model_dir))
model = model.to(device)
print('loaded pretrained weights')
target_dims = {'lin1.mask_scores': 28*28-50}
optimizer = torch.optim.SGD(model.parameters(), lr=.01)
for reg_lambda in lambdas:
for epoch in range(num_fine_tune_epochs):
print('training epoch ', epoch)
cum_loss=0.0
for idx, batch in enumerate(test_loader):
batch_x = torch.flatten(batch[0], start_dim=1).to(device)
batch_y = batch[1].to(device)
optimizer.zero_grad()
out = model(batch_x)
reg = l1_reg_neuron(model)
loss = nn.functional.cross_entropy(out, batch_y) + reg_lambda * reg
loss.backward()
optimizer.step()
cum_loss+=loss
post_prefixes = {'lin1': 'lin2', 'lin2':'lin3'} # gotta be a better way than this....
prune_neuron_weights(model, target_dims, post_prefixes)
# save model
torch.save(model.state_dict(), model_dir_out)
def l1_reg_neuron(model):
reg=0.0
for name, param in model.named_parameters():
if "mask_scores" in name:
reg += torch.sum(param) # l1 norm is sum of weights
return reg
# should just feed a list of layer names accessible in model.state_dict() so i can more easily parse
def prune_neuron_weights(model, target_dims: dict, post_prefixes:dict):
'''prune model to match target dims\\
target dims is dict of neuron mask name, dimension\\
post prefixes is a dict of layer name, next layer name. The prunable layers surrounding neuron mask
'''
for name, param in model.named_parameters(): # BUG: my mask scores are not in the named parameters section because i didn't register them. should store them somewhere they're accessible or register them as parameters
if "mask_scores" in name:
_, idx = param.sort()
num_to_keep = target_dims[name]
# get prev and next layers
prefix = name[:len(name)-12]
post_prefix = post_prefixes[prefix]
prev_weight = model.state_dict()[prefix+'.weight'] # mask will correspond to input layer
post_weight = model.state_dict()[post_prefix+'.weight']
# for name param in model.parameters
# if prefix of current name matches
# incoming_weights = model.named_parameters()[1]
incoming_weights = incoming_weights[idx[:num_to_keep], :] # prune rows of incoming weight matrix
# remove corresponding weights
def mnist_pruning_test(num_epochs, model_dir, pruning_method="row"):
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
device='cpu'
train_data = torchvision.datasets.MNIST('/home/ahoffman/research/transformers/examples/alex/struct-pruning/emmental', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_batch_size=128
test_data = torchvision.datasets.MNIST('/home/ahoffman/research/transformers/examples/alex/struct-pruning/emmental', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
test_loader = torch.utils.data.DataLoader(test_data, batch_size=test_batch_size, shuffle=True)
model = MLP(28*28, 10, pruning_method).to(device)
# load pretrained model if possible to save training time
if os.path.exists(model_dir):
model.load_state_dict(torch.load(model_dir))
model = model.to(device)
else: # train
optimizer = torch.optim.SGD(model.parameters(), lr=.01)
threshold=1.0
for threshold in [0.6]: #,.8,.6,.4,.2]:
for epoch in range(num_epochs):
print('training epoch ', epoch)
cum_loss=0.0
for idx, batch in enumerate(test_loader):
batch_x = torch.flatten(batch[0], start_dim=1).to(device)
batch_y = batch[1].to(device)
optimizer.zero_grad()
out = model(batch_x, threshold)
loss = nn.functional.cross_entropy(out, batch_y)
cum_loss+=loss
loss.backward()
optimizer.step()
# save model
torch.save(model.state_dict(), model_dir)
# step 1: init pretrained model with neurons
# step 2: add regularization to loss function
# step 3: fine-tune, view neurons
# step 4: prune weights using neuron scores
# eval
# mnist_eval(model, test_loader, 1.0, device)
# model.lin1.pruning_method="row"
# model.lin2.pruning_method="row"
# model.lin3.pruning_method="row"
# model.lin3.bias.data = torch.zeros(model.lin3.bias.shape).to(device)
# mnist_eval(model, test_loader, 0.6, device)
# model.lin1.pruning_method="col"
# model.lin2.pruning_method="col"
# model.lin3.pruning_method="col"
# mnist_eval(model, test_loader, 0.6, device)
# making the final pruning method 'row' reduces performance a ton, but the outputs are not zeroed
# this is because it zeros one of the output neurons, making it impossible to learn features specific to that one. Fix this by actually pruning neurons. Not row after row or col after col
# def prune_neurons()
# def prune_neurons_using_rows(model, mag_threshold, kperc, method="row"):
# weights = (model.lin1.data, model.lin2.data)
# L1_dim=1
# if method=="row":
# L1_dim=0
# for weight in weights:
# L1_mean = torch.mean(weight.abs(), dim=L1_dim)
# _, idx = L1_mean.sort(descending=True)
# num_to_keep = int(threshold*L1_mean.numel())
# mask[idx[:num_to_keep],:] = 1.0 # largest num_to_keep rows are kept by writing one to their mask
# mask[idx[num_to_keep:],:] = 0.0
# return mask
if __name__=="__main__":
# mnist_pruning_test(5, os.path.join(os.path.dirname(os.path.abspath(__file__)),"mnist_mlp_model.pt"), pruning_method="row")
mnist_neuron_pruning(5, [1], os.path.join(os.path.dirname(os.path.abspath(__file__)),"mnist_mlp_model.pt"), os.path.join(os.path.dirname(os.path.abspath(__file__)),"mnist_mlp_model_pruned.pt"))
# threshold=0.6 # percent to keep
# dim=10
# dim2=dim+2
# weight = torch.rand((dim,dim2))
# weight.requires_grad=True
# x = torch.rand((dim))
# row_mask = L1RowBinarizer.apply(weight, threshold)
# col_mask = L1ColBinarizer.apply(weight, threshold)
# # print(row_mask)
# trg = torch.ones((dim2))
# optimizer = SGD([weight], lr=.1)
# for mask in [row_mask, col_mask]:
# optimizer.zero_grad()
# masked_weight = mask*weight
# out = torch.mv(masked_weight.transpose(0,1), x) # matrix vector mult
# loss = torch.nn.functional.l1_loss(out, trg)
# loss.backward()
# print(out)
# print(weight.grad)
# optimizer.zero_grad()
# # masked_weight = mask*weight
# out = torch.mv(weight.transpose(0,1), x) # matrix vector mult
# loss = torch.nn.functional.l1_loss(out, trg)
# loss.backward()
# print(out)
# print(weight.grad)
# print('x')
# print(x)
# for i in range(10):
# optimizer.zero_grad()
# masked_weight = row_mask*weight
# out = torch.mv(masked_weight.transpose(0,1), x) # matrix vector mult
# loss1 = torch.nn.functional.l1_loss(out, trg, reduction='sum')
# loss1.backward()
# optimizer.step()
# print(loss1)
# prune threshold
dims=[64,512,2048]
def runtime_analysis(dims):
for dim in dims:
threshold=0.5
inputs = torch.rand((dim,dim))
then=time.time()
mask = inputs.clone()
_, idx = inputs.abs().flatten().sort(descending=True)
j = int(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0
flat_out[idx[:j]] = 1
now=time.time()
print(dim,"elapsed:",now-then)
# fixed magnitude threshold, no sorting
then=time.time()
mask=inputs>threshold
now=time.time()
print(dim," no sort elapsed:",now-then)
# L1 mean sorting
then=time.time()
mask= l1_percentage_mask(inputs, threshold)
now=time.time()
print(dim," l1 elapsed:",now-then)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
inputs.to(device)
dims=[64,512,2048]
for dim in dims:
threshold=0.5
inputs = torch.rand((dim,dim))
then=time.time()
mask = inputs.clone()
_, idx = inputs.abs().flatten().sort(descending=True)
j = int(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0
flat_out[idx[:j]] = 1
now=time.time()
print(dim,"elapsed:",now-then)
# fixed magnitude threshold, no sorting
then=time.time()
mask=inputs>threshold
now=time.time()
print(dim," no sort elapsed:",now-then)
# L1 mean sorting
then=time.time()
mask= L1RowBinarizer.apply(inputs, threshold)
now=time.time()
print(dim," l1 elapsed:",now-then)
# conclusion: sorting the L1 means is faster than sorting every weight, but slower than applying simple
# magnitude threshold
|
########################################
# CS/CNS/EE 155 2018
# Problem Set 1
#
# Author: Andrew Kang
# Description: Set 1 SGD helper
########################################
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
# All functions operate under the assumption that:
# - We are dealing with SGD in the 2-dimensional case
# - The dataset is drawn from points in the domain [-1, 1] X [-1, 1]
####################
# DATASET FUNCTIONS
####################
def generate_dataset(N, f, noise):
# Generates an approximately linearly separable dataset:
# - X is drawn from [-1, 1] X [-1, 1].
# - Y is the dot product of X with some f plus some noise.
X = np.random.uniform(-1, 1, (N, 2))
Y = np.dot(f, X.T) + noise * np.random.rand(N)
return X, Y
def generate_dataset1():
# A specific instance of generate_dataset().
np.random.seed(155)
return generate_dataset(500, np.array([0.5, -0.1]).T, 0.1)
def generate_dataset2():
# A specific instance of generate_dataset().
np.random.seed(155)
return generate_dataset(500, np.array([-0.2, -0.3]).T, 0.1)
####################
# PLOTTING FUNCTIONS
####################
def plot_dataset(X, Y, show=True):
# Create a new figure and get its axes.
plt.close('all')
fig = plt.figure()
ax = fig.gca()
# Plot X and Y with the 'bwr' colormap centered at zero.
plt.set_cmap('bwr')
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolor='black', linewidth=0.5,
vmin=min(np.min(Y), -np.max(Y)), vmax=max(np.max(Y), -np.min(Y)))
plt.colorbar()
# Label the axes.
plt.xlabel('x_1')
plt.ylabel('x_2')
# Ugly code.
if show:
plt.show()
else:
return fig, ax
def get_loss_grid(x_params, y_params, X, Y, loss):
# Get 2D meshgrid.
dx = np.linspace(*x_params)
dy = np.linspace(*y_params)
w_grid = np.meshgrid(dx, dy)
# Evaluate loss on each point of the meshgrid.
loss_grid = np.zeros_like(w_grid[0])
for i in range(len(loss_grid)):
for j in range(len(loss_grid[0])):
w = np.array([w_grid[0][i, j], w_grid[1][i, j]])
loss_grid[i, j] = loss(X, Y, w)
return w_grid, loss_grid
def plot_loss_function(X_grid, Y_grid, loss_grid):
# Create a new figure and get its axes.
plt.close('all')
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the loss function in 3D.
surf = ax.plot_surface(X_grid, Y_grid, loss_grid, color='yellow', zorder=0)
return fig, ax, surf
####################
# SGD ANIMATION FUNCTIONS
####################
def multiSGD(SGD, X, Y, params, N_epochs):
# Arrays to store the results of SGD.
losses_lst = np.zeros((len(params), N_epochs))
W_lst = np.zeros((len(params), N_epochs, 2))
for i, param in enumerate(params):
print('Performing SGD with parameters', param, '...')
# Run SGD on the current set of parameters and store the results.
W, losses = SGD(X, Y, param['w_start'], param['eta'], N_epochs)
W_lst[i] = W
losses_lst[i] = losses
# some abysmal variable naming here... lol whoops
return W_lst, losses_lst
def animate_sgd_suite(SGD, loss, X, Y, params, N_epochs, step, ms=1):
delay = 5
# Run SGD on each set of parameters.
W_lst, losses_lst = multiSGD(SGD, X, Y, params, N_epochs)
# Get the loss grid and plot it.
w_grid, loss_grid = get_loss_grid((-1, 1, 100), (-1, 1, 100), X, Y, loss)
fig, ax, surf = plot_loss_function(w_grid[0], w_grid[1], loss_grid)
# Label the axes:
ax.set_xlabel('x_1')
ax.set_ylabel('x_2')
# Plot w_start values.
_, = ax.plot(W_lst[:, 0, 0], W_lst[:, 0, 1], losses_lst[:, 0], '+', mew=2, ms=10, c='black')
# Initialize graph to animate on.
graph, = ax.plot([], [], [], 'o', ms=ms, c='black')
graph.set_markeredgecolor('black')
graph.set_markeredgewidth(1)
# Define frame animation function.
def animate(i):
if i > delay:
i -= delay
graph.set_data(W_lst[:, :step*(i+1), 0].flatten(), W_lst[:, :step*(i+1), 1].flatten())
graph.set_3d_properties(losses_lst[:, :step*(i+1)].flatten())
surf.set_zorder(0)
graph.set_zorder(1)
return surf, graph
# Animate!
print('\nAnimating...')
anim = FuncAnimation(fig, animate, frames=int(N_epochs/step)+delay, interval=50)
return anim
def animate_convergence(X, Y, W, step):
delay = 5
# Plot w_start values.
fig, ax = plot_dataset(X, Y, show=False)
# Initialize graph to animate on.
graph, = ax.plot([], [])
# Define frame animation function.
def animate(i):
if i > delay:
i -= delay
w = W[i]
x_ax = np.linspace(-1, 1, 100)
graph.set_data(x_ax, - (w[0] / w[1]) * x_ax)
return graph
# Animate!
print('\nAnimating...')
anim = FuncAnimation(fig, animate, frames=int(len(W)/step)+delay, interval=50)
return anim
# Hey there! Hope you're having fun with the set :^)
|
"""Utilities for dealing with wordlists."""
import fnmatch
import os
from typing import List
def find_wordlist(wordlist_dirs: List[str], fnpattern: str) -> None:
"""Recursively search wordlist directories for a specified filename."""
for wordlist_dir in wordlist_dirs:
_walk_iter = os.walk(wordlist_dir, followlinks=True)
for dirpath, dirnames, filenames in _walk_iter:
for match in fnmatch.filter(filenames, fnpattern):
print(os.path.join(dirpath, match))
def walk_wordlists(wordlist_dirs: List[str]) -> None:
"""Recursively walk the wordlist directories and print all files."""
for wordlist_dir in wordlist_dirs:
_walk_iter = os.walk(wordlist_dir, followlinks=True)
for dirpath, dirnames, filenames in _walk_iter:
if not filenames:
continue
print(dirpath)
for filename in filenames:
print(filename)
print()
|
"""Defines exceptions that can occur when interacting with job data"""
class InvalidConnection(Exception):
"""Exception indicating that the provided job connection was invalid
"""
pass
class InvalidData(Exception):
"""Exception indicating that the provided job data was invalid
"""
pass
class InvalidConfiguration(Exception):
"""Exception indicating that the provided job configuration was invalid
"""
pass
class StatusError(Exception):
"""Exception indicating that an operation cannot be completed due to the current job status.
"""
pass
|
#!/usr/bin/env python
# coding: utf-8
import json
from pyquery import PyQuery
query = PyQuery("https://nijisanji.ichikara.co.jp/member/")
member_urls = [element.get("href") for element in list(query("#liver_list a"))]
member_list = []
for member_url in member_urls:
query = PyQuery(member_url)
member_name_raw = query("title").text()
member_name = member_name_raw.split(" ")[0]
channel_url_raw = query(".elementor-social-icon-youtube")[0].get("href")
channel_url = channel_url_raw.split("?")[0]
member_list.append({"name": member_name, "url": channel_url})
print(
json.dumps(
sorted(member_list, key=lambda x: x["name"]), indent=2, ensure_ascii=False
)
)
|
import gym
import os
import re
import argparse
import pickle
import torch
import datetime
from train import PolicyGradientTrainer
from test import Tester
from paint import Painter
class Options(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--n_episodes', type=int, default=500, help='train episodes')
parser.add_argument('--emb_dim', type=list, default=[20, 20, 20, 20], help='dim of embedding layers')
parser.add_argument('--gamma', type=float, default=0.95, help='decline factor for step reward')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--step_size', type=int, default=100, help='step size in lr_scheduler for optimizer')
self.parser = parser
def parse(self):
arg = self.parser.parse_args(args=[])
return arg
if __name__ == '__main__':
current_time = re.sub(r'\D', '', str(datetime.datetime.now())[0:-7])
if not os.path.exists('./checkpoints/' + current_time):
os.makedirs('./checkpoints/' + current_time)
record_path = './checkpoints/' + current_time + '/data.pkl'
n_repeat = 10
args = Options().parse()
args.env = gym.make('CartPole-v1')
args.model_path = './checkpoints/' + current_time + '/model.pth.tar'
'''
env_name: CartPole-v1
states : (位置x, x加速度, 偏移角度theta, 角加速度)
actions : (向左 0, 向右 1)
'''
best_reward = 0.0
rewards = []
store_flag = True
for i in range(n_repeat):
print(f'========== Repeated Experiment # {i:02d} ===========')
trainer = PolicyGradientTrainer(args)
reward = trainer.train()
rewards.append(reward)
d = {'episode': range(args.n_episodes), 'reward': rewards}
with open(record_path, 'wb') as f:
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
estimate_reward = torch.median(torch.tensor(rewards[-10:]))
if best_reward < estimate_reward:
# store the first trained agent in the repeated experiments
state = {'state_dict': trainer.model.state_dict(), 'estimate_reward': estimate_reward}
torch.save(state, args.model_path)
best_reward = estimate_reward
painter = Painter(record_path)
painter.paint()
tester = Tester(args)
tester.test()
|
#!/usr/bin/python3
# Halide tutorial lesson 6.
# This lesson demonstrates how to evaluate a Func over a domain that
# does not start at (0, 0).
# This lesson can be built by invoking the command:
# make tutorial_lesson_06_realizing_over_shifted_domains
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_06*.cpp -g -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_06 -std=c++11
# LD_LIBRARY_PATH=../bin ./lesson_06
# On os x:
# g++ lesson_06*.cpp -g -I ../include -L ../bin -lHalide -o lesson_06 -std=c++11
# DYLD_LIBRARY_PATH=../bin ./lesson_06
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
from halide import *
def main():
# The last lesson was quite involved, and scheduling complex
# multi-stage pipelines is ahead of us. As an interlude, let's
# consider something easy: evaluating funcs over rectangular
# domains that do not start at the origin.
# We define our familiar gradient function.
gradient = Func ("gradient")
x, y = Var("x"), Var("y")
gradient[x, y] = x + y
# And turn on tracing so we can see how it is being evaluated.
gradient.trace_stores()
# Previously we've realized gradient like so:
#
# gradient.realize(8, 8)
#
# This does three things internally:
# 1) Generates code than can evaluate gradient over an arbitrary
# rectangle.
# 2) Allocates a new 8 x 8 image.
# 3) Runs the generated code to evaluate gradient for all x, y
# from (0, 0) to (7, 7) and puts the result into the image.
# 4) Returns the new image as the result of the realize call.
# What if we're managing memory carefully and don't want Halide
# to allocate a new image for us? We can call realize another
# way. We can pass it an image we would like it to fill in. The
# following evaluates our Func into an existing image:
print("Evaluating gradient from (0, 0) to (7, 7)")
result = Image(Int(32), 8, 8)
gradient.realize(result)
# Let's check it did what we expect:
for yy in range(8):
for xx in range(8):
if result(xx, yy) != xx + yy:
print("Something went wrong!")
return -1
# Now let's evaluate gradient over a 5 x 7 rectangle that starts
# somewhere else -- at position (100, 50). So x and y will run
# from (100, 50) to (104, 56) inclusive.
# We start by creating an image that represents that rectangle:
shifted = Image(Int(32), 5, 7) # In the constructor we tell it the size.
shifted.set_min(100, 50) # Then we tell it the top-left corner.
print("Evaluating gradient from (100, 50) to (104, 56)")
# Note that this won't need to compile any new code, because when
# we realized it the first time, we generated code capable of
# evaluating gradient over an arbitrary rectangle.
gradient.realize(shifted)
# From C++, we also access the image object using coordinates
# that start at (100, 50).
for yy in range(50, 57):
for xx in range(100, 105):
if shifted(xx, yy) != xx + yy:
print("Something went wrong!")
return -1
# The image 'shifted' stores the value of our Func over a domain
# that starts at (100, 50), so asking for shifted(0, 0) would in
# fact read out-of-bounds and probably crash.
# What if we want to evaluate our Func over some region that
# isn't rectangular? Too bad. Halide only does rectangles :)
print("Success!")
return 0
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.