repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
CyberDani/personal-roadmap | webPage/generator/modules/uTest.py | <filename>webPage/generator/modules/uTest.py
import sys
import unittest
from defTypes import appDecisionType
from modules import checks
def runAndEvaluateUnitTests(relativeDirPath, filePattern, outputStream = None):
lines = []
unitTestsResult = collectAndRunUnitTestsByFilePattern(relativeDirPath, filePattern, outputStream)
if unitTestsResult.wasSuccessful():
lines.append(' - ALL UNIT TESTS PASSED -\n')
return appDecisionType.AppDecisionType.CONTINUE_RUNNING, lines
lines.append('\n ======= UNIT TEST FAILED ======= ')
lines.append('\n [!] No operation can be done until all tests pass!')
return appDecisionType.AppDecisionType.STOP_APP, lines
def collectAndRunUnitTestsByFilePattern(relativeDirPath, filePattern, outputStream = None):
checks.checkIfString(relativeDirPath, 2, 300)
checks.checkIfString(filePattern, 1, 300)
suites = unittest.TestSuite()
loader = unittest.TestLoader()
runner = unittest.TextTestRunner(stream = outputStream, verbosity = 0)
# suites.addTest(loader.loadTestsFromName('unitTests.unitTestsRunner_test'))
suites.addTest(loader.discover(relativeDirPath, pattern = filePattern))
result = runner.run(suites)
if result.testsRun == 0:
raise Exception('No tests found to run!')
return result
|
CyberDani/personal-roadmap | webPage/generator/modules/webLibs.py | from modules import htmlBuilder
from modules import checks
def addFontAwesome_v611(htmlFile, indentDepth):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(htmlFile, indentDepth,
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/css/all.min.css",
"<KEY>
"anonymous", "no-referrer")
htmlBuilder.addJsScriptSrcToHtmlOutputFile(htmlFile, indentDepth,
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/js/all.min.js",
"<KEY>
"anonymous", "no-referrer")
def addJquery_v360(htmlFile, indentDepth):
htmlBuilder.addJsScriptSrcToHtmlOutputFile(htmlFile, indentDepth,
"https://cdnjs.cloudflare.com/ajax/libs/jquery/3.6.0/jquery.min.js",
"<KEY>
"anonymous", "no-referrer")
def addMaterialize_v110_alpha(htmlFile, indentDepth):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(htmlFile, indentDepth,
"https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/css/materialize.min.css")
htmlBuilder.addJsScriptSrcToHtmlOutputFile(htmlFile, indentDepth,
"https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/js/materialize.min.js")
def addGoogleIcons(htmlFile, indentDepth):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(htmlFile, indentDepth, "https://fonts.googleapis.com/icon?family=Material+Icons")
def addJQueryLoadingOverlay_v217(htmlFile, indentDepth):
htmlBuilder.addJsScriptSrcToHtmlOutputFile(htmlFile, indentDepth,
"https://cdn.jsdelivr.net/npm/gasparesganga-jquery-loading-overlay@2.1.7/dist/loadingoverlay.min.js")
def addGoogleFont(htmlFile, indentDepth, name):
checks.checkIfString(name, 3, 300)
tabs = htmlBuilder.getEscapedTabs(indentDepth)
htmlFile.write(tabs + "<link rel=\"preconnect\" href=\"https://fonts.googleapis.com\">\n")
htmlFile.write(tabs + "<link rel=\"preconnect\" href=\"https://fonts.gstatic.com\" crossorigin>\n")
htmlFile.write(tabs + "<link href=\"https://fonts.googleapis.com/css2" + name +"\" rel=\"stylesheet\">\n") |
CyberDani/personal-roadmap | webPage/generator/unitTests/appDecisionType_test.py | import unittest
import sys
sys.path.append('..')
from defTypes import appDecisionType
class AppDecisionTypeTests(unittest.TestCase):
def test_values(self):
appDecisionType.AppDecisionType.STOP_APP
appDecisionType.AppDecisionType.CONTINUE_RUNNING
def test_validateLength(self):
self.assertEqual(len(appDecisionType.AppDecisionType), 2)
|
CyberDani/personal-roadmap | webPage/generator/modules/htmlHead.py | <gh_stars>0
from modules import checks
from modules import htmlBuilder
from modules import webLibs
class HtmlHead:
def __init__(self, htmlFile, indentDepth):
checks.checkIntIsBetween(indentDepth, 1, 30)
checks.checkIfFile(htmlFile)
self.htmlFile = htmlFile
self.indentDepth = indentDepth
self.titleSet = False
self.faviconSet = False
self.metaScreenOptimizedForMobile = False
self.fontAwesomeLibAdded = False
self.jQueryLibAdded = False
self.googleIconsLibAdded = False
self.materializeLibAdded = False
self.googleFontLibAdded = False
self.jQueryLoadingOverlayLibAdded = False
def setTitle(self, title):
if self.titleSet:
raise Exception("A title is already set, will not add '{}'.".format(title))
self.titleSet = True
htmlBuilder.addTitleToHtmlOutputFile(self.htmlFile, title, self.indentDepth)
return self
def setFavicon(self, favIconPath):
if self.faviconSet:
raise Exception("A favicon is already set, will not add '{}'.".format(favIconPath))
self.faviconSet = True
htmlBuilder.addFaviconToHtmlOutputFile(self.htmlFile, favIconPath, self.indentDepth)
return self
def setMetaScreenOptimizedForMobile(self):
if self.metaScreenOptimizedForMobile:
raise Exception("A meta tag for optimizing screen for mobile had already been added")
self.metaScreenOptimizedForMobile = True
htmlBuilder.addMetaScreenOptimizedForMobileToHtmlOutputFile(self.htmlFile, self.indentDepth)
return self
def includeFileAsInlineCSS(self, filePath):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(self.htmlFile, filePath,
"style", "", self.indentDepth)
return self
def addFontAwesome_v611(self):
if self.fontAwesomeLibAdded:
raise Exception("Fontawesome library had already been added")
self.fontAwesomeLibAdded = True
webLibs.addFontAwesome_v611(self.htmlFile, self.indentDepth)
return self
def addJquery_v360(self):
if self.jQueryLibAdded:
raise Exception("jQuery library had already been added")
self.jQueryLibAdded = True
webLibs.addJquery_v360(self.htmlFile, self.indentDepth)
return self
def addGoogleIcons(self):
if self.googleIconsLibAdded:
raise Exception("Google Icons library had already been added")
self.googleIconsLibAdded = True
webLibs.addGoogleIcons(self.htmlFile, self.indentDepth)
return self
def addMaterialize_v110_alpha(self):
if self.materializeLibAdded:
raise Exception("Materialize library had already been added")
self.materializeLibAdded = True
webLibs.addMaterialize_v110_alpha(self.htmlFile, self.indentDepth)
return self
def addGoogleFont(self, fontString):
if self.googleFontLibAdded:
raise Exception("Google font library had already been added")
self.googleFontLibAdded = True
webLibs.addGoogleFont(self.htmlFile, self.indentDepth, fontString)
return self
def addJQueryLoadingOverlay_v217(self):
if self.jQueryLoadingOverlayLibAdded:
raise Exception("jQuery Loading Overlay library had already been added")
self.jQueryLoadingOverlayLibAdded = True
webLibs.addJQueryLoadingOverlay_v217(self.htmlFile, self.indentDepth)
return self
|
CyberDani/personal-roadmap | webPage/generator/unitTests/stringUtil_test.py | import sys
import unittest
sys.path.append('..')
from modules import stringUtil
class StringUtilTests(unittest.TestCase):
def test_getStringStartsWithEndsWith_nonSense(self):
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap(None, None, None)
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("", "Mozart", "Bach")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("X", "Mozart", "Bach")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("", "", "")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("", "", "endStr")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("", "beginStr", "")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("Small test string", "", "test")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("Small test string", "", "asd")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("Small test string", "Small", "")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("Small test string", "asd", "")
with self.assertRaises(Exception):
stringUtil.getStringStartsWithEndsWithNoOverlap("Small test string", "", "")
def test_getStringStartsWithEndsWith_stringNotFound(self):
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "test", "end")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "begin", "string")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "begin", "end")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "x", "y")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "tesu", "string")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "tests", "string")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "test", "strings")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "test", "strinh")
self.assertEqual(len(ans), 0)
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("abcabcQWE", "ca", "ab")
self.assertEqual(len(ans), 0)
def test_getStringStartsWithEndsWith_stringFound(self):
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "test", "string")
self.assertEqual(ans, "test string")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "test", "g")
self.assertEqual(ans, "test string")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "t", "g")
self.assertEqual(ans, "test string")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "te", "st")
self.assertEqual(ans, "test")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "st", "st")
self.assertEqual(ans, "st st")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "s", "s")
self.assertEqual(ans, "st s")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("test string", "s", "t")
self.assertEqual(ans, "st")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("abcabcQWEabcabc", "ca", "ab")
self.assertEqual(ans, "cabcQWEab")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("abcabcQWEabcabc", "bca", "bca")
self.assertEqual(ans, "bcabcQWEabca")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("abcabcQWEabcabc", "bca", "ca")
self.assertEqual(ans, "bcabcQWEabca")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("abcabcQWAEabcabc", "bca", "a")
self.assertEqual(ans, "bcabcQWAEa")
ans = stringUtil.getStringStartsWithEndsWithNoOverlap("abcabcQWAEabcabc", "abc", "abc")
self.assertEqual(ans, "abcabc")
def test_rTrimNewLines_nonSense(self):
with self.assertRaises(Exception):
stringUtil.rTrimNewLines()
with self.assertRaises(Exception):
stringUtil.rTrimNewLines(["hello"])
with self.assertRaises(Exception):
stringUtil.rTrimNewLines(True)
with self.assertRaises(Exception):
stringUtil.rTrimNewLines(None)
def test_rTrimNewLines_examples(self):
ans = stringUtil.rTrimNewLines("")
self.assertEqual(ans, "")
ans = stringUtil.rTrimNewLines("X")
self.assertEqual(ans, "X")
ans = stringUtil.rTrimNewLines("heLLo")
self.assertEqual(ans, "heLLo")
ans = stringUtil.rTrimNewLines("\n")
self.assertEqual(ans, "")
ans = stringUtil.rTrimNewLines("\r\n")
self.assertEqual(ans, "")
ans = stringUtil.rTrimNewLines("\n\n")
self.assertEqual(ans, "")
ans = stringUtil.rTrimNewLines("\r\n\r\n")
self.assertEqual(ans, "")
ans = stringUtil.rTrimNewLines("\r\r\r")
self.assertEqual(ans, "")
ans = stringUtil.rTrimNewLines("hey!\n")
self.assertEqual(ans, "hey!")
ans = stringUtil.rTrimNewLines("\r\nwinLine\r\n")
self.assertEqual(ans, "\r\nwinLine")
ans = stringUtil.rTrimNewLines("winLine\r\n")
self.assertEqual(ans, "winLine")
ans = stringUtil.rTrimNewLines("firstRow\r\nsecondRow\r\n")
self.assertEqual(ans, "firstRow\r\nsecondRow")
ans = stringUtil.rTrimNewLines("\nfirstRow\n\r\nsecondRow\r\n")
self.assertEqual(ans, "\nfirstRow\n\r\nsecondRow")
ans = stringUtil.rTrimNewLines("\nfirstRow\n\r\nsecondRow\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n")
self.assertEqual(ans, "\nfirstRow\n\r\nsecondRow")
ans = stringUtil.rTrimNewLines("\n\nfirstRow\n\r\n\nsecondRow\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n")
self.assertEqual(ans, "\n\nfirstRow\n\r\n\nsecondRow") |
CyberDani/personal-roadmap | webPage/generator/defTypes/buildSettings.py | import io
from dataclasses import dataclass
from defTypes import buildType
from defTypes import dbBranchType
from modules import counter
@dataclass
class BuildSettings:
buildOption: buildType.BuildType
dbBranch: dbBranchType.DbBranchType
stepsCounter: counter.SimpleCounter
htmlOutputFile: io.TextIOBase
indentDepth: int = 1
def __post_init__(self):
if not isinstance(self.dbBranch, dbBranchType.DbBranchType):
raise Exception("Type dbBranchType.DbBranchType mismatch!")
if not isinstance(self.buildOption, buildType.BuildType):
raise Exception("Type buildType.BuildType mismatch!")
if not isinstance(self.htmlOutputFile, io.TextIOBase):
raise Exception("Type io.TextIOBase mismatch!")
if not isinstance(self.indentDepth, int):
raise Exception("Type int mismatch!")
if not isinstance(self.stepsCounter, counter.SimpleCounter):
raise Exception("Type counter.SimpleCounter mismatch!")
|
CyberDani/personal-roadmap | webPage/generator/modules/checks.py | import io
import json
def checkIfValidJsonFile(file):
checkIfFile(file)
try:
return json.load(file)
except ValueError as e:
raise Exception('Invalid json: {0}'.format(e))
def checkIfValidJsonFileByFilePath(filePath):
checkIfString(filePath, 2, 300)
f = open(filePath, "r")
return checkIfValidJsonFile(f)
def checkIfStringIsAlphaNumerical(string):
if type(string) != str:
raise Exception("Not a string type: '{0}'".format(str(string)))
if not string.isalnum():
raise Exception("String {} is not alphanumerical!".format(string))
def checkIfAllNoneOrString(listVar, minStringLength, maxStringLength):
checkIfList(listVar)
if len(listVar) == 0:
raise Exception("List must not be empty")
allNone = True
for val in listVar:
if val is not None:
allNone = False
break
if allNone:
return
for val in listVar:
checkIfString(val, minStringLength, maxStringLength)
def checkIfString(var, minLength, maxLength):
if type(var) != str:
raise Exception("Not a string type: '{0}'".format(str(var)))
if type(minLength) != int:
raise Exception("minLength not an int type")
if type(maxLength) != int:
raise Exception("maxLength not an int type")
if minLength < 0:
raise Exception("minLength cannot be a negative number")
if maxLength < minLength:
raise Exception("maxLength [{0}] < minLength [{1}]".format(maxLength, minLength))
if len(var) < minLength:
raise Exception("String is too short")
if len(var) > maxLength:
raise Exception("String is too long")
def checkIfPureListOfStrings(var):
checkIfList(var)
for val in var:
if type(val) != str:
raise Exception("The list has a non-string element: '{0}'".format(str(val)))
def checkIfList(var):
if type(var) != list:
raise Exception("Not a list type")
def checkIfFile(file):
if not isinstance(file, io.TextIOBase):
raise Exception("The file is not a TextIOWrapper type argument")
def checkIntIsBetween(var, minValue, maxValue):
if type(minValue) != int:
raise Exception("minValue not an int type")
if type(maxValue) != int:
raise Exception("maxValue not an int type")
if type(var) != int:
raise Exception("Not an int type for argument " + str(var))
if maxValue < minValue:
raise Exception("max [{0}] < min[{1}]".format(maxValue, minValue))
if var < minValue:
raise Exception("int < " + minValue + " for argument " + str(var))
if var > maxValue:
raise Exception("Do you really need that int to be {0}?".format(var))
|
CyberDani/personal-roadmap | webPage/generator/unitTests/learningTests_test.py | import unittest
class UnitTestLearningTests(unittest.TestCase):
def setUp(self):
self.a = 2
def tearDown(self):
self.a = 22
@unittest.skip("demonstrating skipping")
def test_nothing(self):
self.fail("shouldn't happen")
@unittest.expectedFailure
def test_fail(self):
self.assertEqual(1, 0, "broken")
def test_skipIfCondition(self):
"""
Test that a is 2 as set in setup()
"""
self.assertLess(self.a, 20, "something is wrong")
if self.a == 22:
self.skipTest("external resource not available")
def test_isEven(self):
with self.subTest("a = {0}".format(self.a)):
self.assertEqual(self.a % 2, 0)
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2) |
CyberDani/personal-roadmap | webPage/generator/modules/filerw.py | import os
from modules import checks
from modules import stringUtil
###### Reads ######
def fileExists(filePath):
checks.checkIfString(filePath, 2, 300)
return os.path.isfile(filePath)
def getLinesByFilePathWithEndingNewLine(filePath):
checks.checkIfString(filePath, 2, 300)
f = open(filePath, "r")
return f.readlines()
def getLinesWithEndingNewLine(file):
checks.checkIfFile(file)
return file.readlines()
def getLinesByFilePath(filePath):
linesWithNewEndingline = getLinesByFilePathWithEndingNewLine(filePath)
return rTrimNewLines(linesWithNewEndingline)
def getLines(file):
linesWithNewEndingline = getLinesWithEndingNewLine(file)
return rTrimNewLines(linesWithNewEndingline)
###### Writes ######
def writeLinesToFile(file, lines):
checks.checkIfFile(file)
checks.checkIfPureListOfStrings(lines)
n = len(lines)
for i in range(n):
file.write(lines[i])
if (i < n - 1):
file.write("\n")
def writeLinesToFileByFilePath(filePath, lines):
checks.checkIfString(filePath, 2, 300)
file = open(filePath, "w")
writeLinesToFile(file, lines)
file.close()
def writeLinesToFileThenAppendNewLine(file, lines):
checks.checkIfFile(file)
checks.checkIfPureListOfStrings(lines)
for line in lines:
file.write(line)
file.write("\n")
def writeLinesToFileByFilePathThenAppendNewLine(filePath, lines):
checks.checkIfString(filePath, 2, 300)
file = open(filePath, "w")
writeLinesToFileThenAppendNewLine(file, lines)
file.close()
def writeStringsPrefixedToFileThenAppendNewLine(file, prefix, lines):
checks.checkIfFile(file)
checks.checkIfPureListOfStrings(lines)
checks.checkIfString(prefix, 0, 300)
for line in lines:
if (line and line != "\n" and line != "\r\n"):
file.write(prefix + line)
else:
file.write("\n")
file.write("\n")
def writeLinesPrefixedToFileThenAppendNewLine(file, prefix, lines):
checks.checkIfFile(file)
checks.checkIfPureListOfStrings(lines)
checks.checkIfString(prefix, 0, 300)
for line in lines:
if (line and line != "\n" and line != "\r\n"):
file.write(prefix + line + "\n")
else:
file.write("\n")
file.write("\n")
def writeLinesPrefixedToFile(file, prefix, lines):
checks.checkIfFile(file)
checks.checkIfPureListOfStrings(lines)
checks.checkIfString(prefix, 0, 300)
for line in lines:
if (line and line != "\n" and line != "\r\n"):
file.write(prefix + line + "\n")
else:
file.write("\n")
###### Helper functions ######
def rTrimNewLines(stringsArr):
checks.checkIfPureListOfStrings(stringsArr)
result = []
for string in stringsArr:
result.append(stringUtil.rTrimNewLines(string))
return result |
CyberDani/personal-roadmap | webPage/generator/unitTests/dbBranchType_test.py | <gh_stars>0
import unittest
import sys
sys.path.append('..')
from defTypes import dbBranchType
class DbBranchTypeTests(unittest.TestCase):
def test_values(self):
dbBranchType.DbBranchType.MASTER
dbBranchType.DbBranchType.DEVEL
def test_validateLength(self):
self.assertEqual(len(dbBranchType.DbBranchType), 2) |
CyberDani/personal-roadmap | webPage/generator/unitTests/buildSettings_test.py | <reponame>CyberDani/personal-roadmap<filename>webPage/generator/unitTests/buildSettings_test.py
import unittest
import sys
sys.path.append('..')
from defTypes import buildSettings
from defTypes import buildType
from defTypes import dbBranchType
from modules import counter
class BuildSettingsTests(unittest.TestCase):
def test_validateDataMembers_oneExample(self):
file = open("./unitTests/temp/test.txt", "w")
ctr = counter.SimpleCounter(1)
settings = buildSettings.BuildSettings(dbBranch=dbBranchType.DbBranchType.DEVEL,
htmlOutputFile=file,
buildOption=buildType.BuildType.REBUILD,
indentDepth=2,
stepsCounter=ctr)
self.assertEqual(settings.buildOption, buildType.BuildType.REBUILD)
self.assertEqual(settings.dbBranch, dbBranchType.DbBranchType.DEVEL)
self.assertEqual(settings.indentDepth, 2)
self.assertTrue(settings.htmlOutputFile.writable())
self.assertEqual(settings.stepsCounter.getNextInt(), 1)
def test_validateDataMembers_anotherExample(self):
file = open("./unitTests/temp/test.txt", "w")
ctr = counter.SimpleCounter(11)
settings = buildSettings.BuildSettings(dbBranch=dbBranchType.DbBranchType.MASTER,
htmlOutputFile=file,
buildOption=buildType.BuildType.DO_NOT_BUILD,
indentDepth=12,
stepsCounter=ctr)
self.assertEqual(settings.buildOption, buildType.BuildType.DO_NOT_BUILD)
self.assertEqual(settings.dbBranch, dbBranchType.DbBranchType.MASTER)
self.assertEqual(settings.indentDepth, 12)
self.assertTrue(settings.htmlOutputFile.writable())
self.assertEqual(settings.stepsCounter.getNextInt(), 11)
def test_setDataMembers_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
ctr = counter.SimpleCounter(11)
with self.assertRaises(Exception):
buildSettings.BuildSettings(dbBranch="master",
htmlOutputFile=file,
buildOption=buildType.BuildType.DO_NOT_BUILD,
indentDepth=12,
stepsCounter=ctr)
with self.assertRaises(Exception):
buildSettings.BuildSettings(dbBranch=dbBranchType.DbBranchType.MASTER,
htmlOutputFile=file,
buildOption="build",
indentDepth=12,
stepsCounter=ctr)
with self.assertRaises(Exception):
buildSettings.BuildSettings(dbBranch=dbBranchType.DbBranchType.MASTER,
htmlOutputFile="index.html",
buildOption=buildType.BuildType.DO_NOT_BUILD,
indentDepth=12,
stepsCounter=ctr)
with self.assertRaises(Exception):
buildSettings.BuildSettings(dbBranch=dbBranchType.DbBranchType.MASTER,
htmlOutputFile=file,
buildOption=buildType.BuildType.DO_NOT_BUILD,
indentDepth="zero",
stepsCounter=ctr)
with self.assertRaises(Exception):
buildSettings.BuildSettings(dbBranch=dbBranchType.DbBranchType.MASTER,
htmlOutputFile=file,
buildOption=buildType.BuildType.DO_NOT_BUILD,
indentDepth=2,
stepsCounter=1)
|
CyberDani/personal-roadmap | webPage/generator/modules/git.py | from modules import cmd
from modules import stringUtil
def getRepoRootDirectory():
ans = cmd.getOutputFromCommand("git rev-parse --show-toplevel")
return stringUtil.rTrimNewLines(ans)
def getCurrentBranch():
f = open(getRepoRootDirectory() + "/.git/HEAD", "r")
content = f.read().splitlines()
for line in content:
if line[0:4] == "ref:":
return line.partition("refs/heads/")[2]
raise Exception("No branch name had been found!") |
CyberDani/personal-roadmap | webPage/generator/unitTests/uTest_test.py | <filename>webPage/generator/unitTests/uTest_test.py
import os
import sys
import unittest
sys.path.append('..')
from defTypes import appDecisionType
from modules import checks
from modules import uTest
class UnitTestTests(unittest.TestCase):
def test_collectAndRunUnitTestsByFilePattern_nonSense(self):
void = open(os.devnull, "w")
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern(None, None, void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern("", "", void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern('./unitTests/', "", void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern("", "*.py", void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern('./unitTests/', True, void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern('./unitTests/', True, void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern('./unitTests/', "*.patternWithNoFounding", void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern('./nonExistingFolder/', "*.py", void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern(False, "*.py", void)
with self.assertRaises(Exception):
uTest.collectAndRunUnitTestsByFilePattern(False, True, void)
def test_collectAndRunUnitTestsByFilePattern_examples(self):
void = open(os.devnull, "w")
result = uTest.collectAndRunUnitTestsByFilePattern('./unitTests4unitTests/', "pass_*.py", void)
self.assertEqual(result.testsRun, 6)
self.assertTrue(result.wasSuccessful())
result = uTest.collectAndRunUnitTestsByFilePattern('./unitTests4unitTests/', "*_group1.py", void)
self.assertEqual(result.testsRun, 7)
self.assertFalse(result.wasSuccessful())
def test_runAndEvaluateUnitTests_nonSense(self):
void = open(os.devnull, "w")
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests(None, None)
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests("", "")
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests('./unitTests/', "")
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests("", "*.py")
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests('./unitTests/', True)
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests('./unitTests/', "*.patternWithNoFounding", void)
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests('./nonExistingFolder/', "*.py", void)
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests(False, "*.py")
with self.assertRaises(Exception):
uTest.runAndEvaluateUnitTests(False, True)
def test_runAndEvaluateUnitTests_examples(self):
void = open(os.devnull, "w")
result, lines = uTest.runAndEvaluateUnitTests('./unitTests4unitTests/', "pass_*.py", void)
checks.checkIfPureListOfStrings(lines)
self.assertEqual(result, appDecisionType.AppDecisionType.CONTINUE_RUNNING)
result, lines = uTest.runAndEvaluateUnitTests('./unitTests4unitTests/', "*_group2.py", void)
checks.checkIfPureListOfStrings(lines)
self.assertEqual(result, appDecisionType.AppDecisionType.CONTINUE_RUNNING)
result, lines = uTest.runAndEvaluateUnitTests('./unitTests4unitTests/', "*_group1.py", void)
checks.checkIfPureListOfStrings(lines)
self.assertEqual(result, appDecisionType.AppDecisionType.STOP_APP)
result, lines = uTest.runAndEvaluateUnitTests('./unitTests4unitTests/', "*_x_*.py", void)
checks.checkIfPureListOfStrings(lines)
self.assertEqual(result, appDecisionType.AppDecisionType.STOP_APP) |
CyberDani/personal-roadmap | webPage/generator/unitTests/htmlBody_test.py | <gh_stars>0
import sys
import unittest
sys.path.append('..')
from modules import htmlBuilder
from modules import filerw
from modules import htmlBody
class HtmlBodyTests(unittest.TestCase):
def test_constructor_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlBody.HtmlBody(file, -2)
with self.assertRaises(Exception):
htmlBody.HtmlBody(file, None)
with self.assertRaises(Exception):
htmlBody.HtmlBody(file, "")
with self.assertRaises(Exception):
htmlBody.HtmlBody(file, True)
with self.assertRaises(Exception):
htmlBody.HtmlBody("./unitTests/temp/test.txt", 2)
with self.assertRaises(Exception):
htmlBody.HtmlBody(None, 2)
with self.assertRaises(Exception):
htmlBody.HtmlBody(False, 2)
def test_includeFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2"])
body = htmlBody.HtmlBody(file, 2)
with self.assertRaises(Exception):
body.includeFileThenAppendNewLine(file)
with self.assertRaises(Exception):
body.includeFileThenAppendNewLine(["line3", "line4"])
with self.assertRaises(Exception):
body.includeFileThenAppendNewLine(None)
with self.assertRaises(Exception):
body.includeFileThenAppendNewLine(True)
with self.assertRaises(Exception):
body.includeFileThenAppendNewLine("heyho")
def test_includeFileThenAppendNewLine_includeEmptyFile(self):
file2 = open("./unitTests/temp/test2.txt", "w")
file2.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2"])
body = htmlBody.HtmlBody(file, 2)
body.includeFileThenAppendNewLine("./unitTests/temp/test2.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "")
def test_includeFileThenAppendNewLine_includeNonEmptyFile(self):
file2 = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file2, ["include 1", "include 2"])
file2.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2"])
body = htmlBody.HtmlBody(file, 3)
body.includeFileThenAppendNewLine("./unitTests/temp/test2.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "\t\t\tinclude 1")
self.assertEqual(lines[3], "\t\t\tinclude 2")
self.assertEqual(lines[4], "")
def test_includeFileThenAppendNewLine_chaining(self):
file2 = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file2, ["include 1", "include 2"])
file2.close()
file3 = open("./unitTests/temp/test3.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file3, ["include 3", "include 4"])
file3.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2"])
body = htmlBody.HtmlBody(file, 3)
body.includeFileThenAppendNewLine("./unitTests/temp/test2.txt") \
.includeFileThenAppendNewLine("./unitTests/temp/test3.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "\t\t\tinclude 1")
self.assertEqual(lines[3], "\t\t\tinclude 2")
self.assertEqual(lines[4], "")
self.assertEqual(lines[5], "\t\t\tinclude 3")
self.assertEqual(lines[6], "\t\t\tinclude 4")
self.assertEqual(lines[7], "")
def test_openHtmlTagThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 3)
body.openHtmlTagThenAppendNewLine("div")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("<div")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("<div>")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("/div")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("div\nspan")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("ul selected")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("", "focused")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine(12)
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine(2, "option2")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine(None)
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine(None, "selected")
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("abc", None)
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("abc", 12)
with self.assertRaises(Exception):
body.openHtmlTagThenAppendNewLine("abc", False)
def test_openHtmlTagThenAppendNewLine_addOneTag(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 3)
body.openHtmlTagThenAppendNewLine("div", "class='magicalDiv'")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t\t\t<div class='magicalDiv'>")
def test_openHtmlTagThenAppendNewLine_addTwoTag(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("div", "class='magicalDiv'") \
.openHtmlTagThenAppendNewLine("table")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t<div class='magicalDiv'>")
self.assertEqual(lines[3], "\t\t<table>")
def test_openHtmlTagThenAppendNewLine_addThreeTag(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 2)
body.openHtmlTagThenAppendNewLine("div", "class='magicalDiv'") \
.openHtmlTagThenAppendNewLine("table") \
.openHtmlTagThenAppendNewLine("tr")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t\t<div class='magicalDiv'>")
self.assertEqual(lines[3], "\t\t\t<table>")
self.assertEqual(lines[4], "\t\t\t\t<tr>")
def test_openHtmlTagThenAppendNewLine_indentationWith1HtmlTag(self):
file2 = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file2, ["1. include", "2. include"])
file2.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("div", "class='magicalDiv'") \
.includeFileThenAppendNewLine("./unitTests/temp/test2.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 6)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t<div class='magicalDiv'>")
self.assertEqual(lines[3], "\t\t1. include")
self.assertEqual(lines[4], "\t\t2. include")
self.assertEqual(lines[5], "")
def test_openHtmlTagThenAppendNewLine_indentationWith2HtmlTag(self):
file2 = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFile(file2, ["1. include", "2. include"])
file2.close()
file3 = open("./unitTests/temp/test3.txt", "w")
filerw.writeLinesToFile(file3, ["next", "next -> next", "next -> next -> next"])
file3.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("div", "class='magicalDiv'") \
.includeFileThenAppendNewLine("./unitTests/temp/test2.txt") \
.openHtmlTagThenAppendNewLine("div", "class='nestedDiv'") \
.includeFileThenAppendNewLine("./unitTests/temp/test3.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 9)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t<div class='magicalDiv'>")
self.assertEqual(lines[3], "\t\t1. include")
self.assertEqual(lines[4], "\t\t2. include")
self.assertEqual(lines[5], "\t\t<div class='nestedDiv'>")
self.assertEqual(lines[6], "\t\t\tnext")
self.assertEqual(lines[7], "\t\t\tnext -> next")
self.assertEqual(lines[8], "\t\t\tnext -> next -> next")
def test_closeLastOpenedHtmlTag_closeNothing(self):
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 1)
with self.assertRaises(Exception):
body.closeLastOpenedHtmlTag()
file.close()
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("table").closeLastOpenedHtmlTag()
with self.assertRaises(Exception):
body.closeLastOpenedHtmlTag()
file.close()
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("table").openHtmlTagThenAppendNewLine("tr") \
.closeLastOpenedHtmlTag().closeLastOpenedHtmlTag()
with self.assertRaises(Exception):
body.closeLastOpenedHtmlTag()
file.close()
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("table").openHtmlTagThenAppendNewLine("tr").openHtmlTagThenAppendNewLine("td") \
.closeLastOpenedHtmlTag().closeLastOpenedHtmlTag().closeLastOpenedHtmlTag()
with self.assertRaises(Exception):
body.closeLastOpenedHtmlTag()
def test_closeLastOpenedHtmlTag_oneTag(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("a", "href='link.com'").closeLastOpenedHtmlTag()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t<a href='link.com'>")
self.assertEqual(lines[3], "\t</a>")
def test_closeLastOpenedHtmlTag_twoTag(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 1)
body.openHtmlTagThenAppendNewLine("h2").openHtmlTagThenAppendNewLine("a", "href='link.com'") \
.closeLastOpenedHtmlTag().closeLastOpenedHtmlTag()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 6)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t<h2>")
self.assertEqual(lines[3], "\t\t<a href='link.com'>")
self.assertEqual(lines[4], "\t\t</a>")
self.assertEqual(lines[5], "\t</h2>")
def test_closeLastOpenedHtmlTag_threeTag(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 3)
body.openHtmlTagThenAppendNewLine("div").openHtmlTagThenAppendNewLine("div", "class='myDiv'") \
.openHtmlTagThenAppendNewLine("span") \
.closeLastOpenedHtmlTag().closeLastOpenedHtmlTag().closeLastOpenedHtmlTag()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t\t\t<div>")
self.assertEqual(lines[3], "\t\t\t\t<div class='myDiv'>")
self.assertEqual(lines[4], "\t\t\t\t\t<span>")
self.assertEqual(lines[5], "\t\t\t\t\t</span>")
self.assertEqual(lines[6], "\t\t\t\t</div>")
self.assertEqual(lines[7], "\t\t\t</div>")
def test_closeLastOpenedHtmlTag_indentation(self):
file2 = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFile(file2, ["1. include", "2. include"])
file2.close()
file3 = open("./unitTests/temp/test3.txt", "w")
filerw.writeLinesToFile(file3, ["next", "next -> next", "next -> next -> next"])
file3.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["first line", "second line"])
body = htmlBody.HtmlBody(file, 2)
body.openHtmlTagThenAppendNewLine("table").includeFileThenAppendNewLine("./unitTests/temp/test2.txt")
body.openHtmlTagThenAppendNewLine("tr").includeFileThenAppendNewLine("./unitTests/temp/test3.txt")
body.closeLastOpenedHtmlTag().includeFileThenAppendNewLine("./unitTests/temp/test2.txt")
body.closeLastOpenedHtmlTag().includeFileThenAppendNewLine("./unitTests/temp/test2.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 15)
self.assertEqual(lines[0], "first line")
self.assertEqual(lines[1], "second line")
self.assertEqual(lines[2], "\t\t<table>")
self.assertEqual(lines[3], "\t\t\t1. include")
self.assertEqual(lines[4], "\t\t\t2. include")
self.assertEqual(lines[5], "\t\t\t<tr>")
self.assertEqual(lines[6], "\t\t\t\tnext")
self.assertEqual(lines[7], "\t\t\t\tnext -> next")
self.assertEqual(lines[8], "\t\t\t\tnext -> next -> next")
self.assertEqual(lines[9], "\t\t\t</tr>")
self.assertEqual(lines[10], "\t\t\t1. include")
self.assertEqual(lines[11], "\t\t\t2. include")
self.assertEqual(lines[12], "\t\t</table>")
self.assertEqual(lines[13], "\t\t1. include")
self.assertEqual(lines[14], "\t\t2. include")
def test_addHtmlNewLineThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 2)
with self.assertRaises(Exception):
body.addHtmlNewLineThenAppendNewLine(None)
with self.assertRaises(Exception):
body.addHtmlNewLineThenAppendNewLine(True)
with self.assertRaises(Exception):
body.addHtmlNewLineThenAppendNewLine("")
with self.assertRaises(Exception):
body.addHtmlNewLineThenAppendNewLine("Zero")
with self.assertRaises(Exception):
body.addHtmlNewLineThenAppendNewLine(0)
with self.assertRaises(Exception):
body.addHtmlNewLineThenAppendNewLine(-1)
def test_addHtmlNewLineThenAppendNewLine_1br(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["simple line"])
body = htmlBody.HtmlBody(file, 1)
body.addHtmlNewLineThenAppendNewLine(1)
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "simple line")
self.assertEqual(lines[1], htmlBuilder.getHtmlNewLines(1, 1))
def test_addHtmlNewLineThenAppendNewLine_2br(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["simple line"])
body = htmlBody.HtmlBody(file, 3)
body.addHtmlNewLineThenAppendNewLine(2)
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "simple line")
self.assertEqual(lines[1], htmlBuilder.getHtmlNewLines(3, 2))
def test_addHtmlNewLineThenAppendNewLine_5br(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["simple line"])
body = htmlBody.HtmlBody(file, 2)
body.addHtmlNewLineThenAppendNewLine(5)
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "simple line")
self.assertEqual(lines[1], htmlBuilder.getHtmlNewLines(2, 5))
def test_addHtmlNewLineThenAppendNewLine_2brAnd5br(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["simple line"])
body = htmlBody.HtmlBody(file, 4)
body.addHtmlNewLineThenAppendNewLine(2).addHtmlNewLineThenAppendNewLine(5)
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "simple line")
self.assertEqual(lines[1], htmlBuilder.getHtmlNewLines(4, 2))
self.assertEqual(lines[2], htmlBuilder.getHtmlNewLines(4, 5))
def test_addJsScriptSrcThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 4)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine(False, None, None, None)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("", None, None, None)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("hello", None, None, None)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "sha215-23", None, None)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", None, "anonymous", None)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", None, None, "no-refferer")
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", None, "anonymous", "no-refferer")
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "sha512-23", None, "no-refferer")
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "sha512-23", "anonymous", None)
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "a", "x", "z")
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "abc", "anonymous", "no-refferer")
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "sha512-asdasdc-xcx", "abc", "no-refferer")
with self.assertRaises(Exception):
body.addJsScriptSrcThenAppendNewLine("www.mysite.com/res.js", "sha512-asdasdc-xcx", "anonymous", "ab")
def test_addJsScriptSrcThenAppendNewLine_justUrl(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["- 1 -", "- 2 -"])
body = htmlBody.HtmlBody(file, 1)
body.addJsScriptSrcThenAppendNewLine("myAwesomeSite.com/randomScript.js", None, None, None)
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
jsLines = htmlBuilder.getJsScriptSrc(1, "myAwesomeSite.com/randomScript.js", None, None, None)
self.assertEqual(len(lines), 2 + len(jsLines))
self.assertEqual(lines[0], "- 1 -")
self.assertEqual(lines[1], "- 2 -")
for i in range(0, len(jsLines)):
self.assertEqual(lines[2 + i], jsLines[i])
def test_addJsScriptSrcThenAppendNewLine_urlIntegrityCrossoriginReferrerpolicy(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["- 1 -", "- 2 -"])
body = htmlBody.HtmlBody(file, 6)
body.addJsScriptSrcThenAppendNewLine("https://lookatthis.com/itsascript.js",
"sha512-wgn28cn12ed02d==", "geekyBoy", "no-refferrer")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
jsLines = htmlBuilder.getJsScriptSrc(6, "https://lookatthis.com/itsascript.js",
"sha512-wgn28cn12ed02d==", "geekyBoy", "no-refferrer")
self.assertEqual(len(lines), 2 + len(jsLines))
self.assertEqual(lines[0], "- 1 -")
self.assertEqual(lines[1], "- 2 -")
for i in range(0, len(jsLines)):
self.assertEqual(lines[2 + i], jsLines[i])
def test_includeFileAsInlineJs_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
body = htmlBody.HtmlBody(file, 1)
with self.assertRaises(Exception):
body.includeFileAsInlineJs("nonExistingFile.js")
with self.assertRaises(Exception):
body.includeFileAsInlineJs(None)
with self.assertRaises(Exception):
body.includeFileAsInlineJs(False)
with self.assertRaises(Exception):
body.includeFileAsInlineJs(file)
with self.assertRaises(Exception):
body.includeFileAsInlineJs(122)
def test_includeFileAsInlineJs_example(self):
file2 = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFile(file2, ["function getTwo() {", "\treturn 2;", "}"])
file2.close()
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["\tsome line", "\tsome another line here"])
body = htmlBody.HtmlBody(file, 1)
body.includeFileAsInlineJs("./unitTests/temp/test2.txt")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 7)
self.assertEqual(lines[0], "\tsome line")
self.assertEqual(lines[1], "\tsome another line here")
self.assertEqual(lines[2], "\t<script>")
self.assertEqual(lines[3], "\t\tfunction getTwo() {")
self.assertEqual(lines[4], "\t\t\treturn 2;")
self.assertEqual(lines[5], "\t\t}")
self.assertEqual(lines[6], "\t</script>")
|
CyberDani/personal-roadmap | webPage/generator/unitTests/db_test.py | <gh_stars>0
import unittest
import sys
sys.path.append('..')
from defTypes import dbBranchType
from modules import db
from modules import git
class DbTests(unittest.TestCase):
def test_getDbBranchByGitBranch_nonSense(self):
with self.assertRaises(Exception):
db.getDbBranchByGitBranch("")
with self.assertRaises(Exception):
db.getDbBranchByGitBranch(None)
with self.assertRaises(Exception):
db.getDbBranchByGitBranch(12)
with self.assertRaises(Exception):
db.getDbBranchByGitBranch(True)
with self.assertRaises(Exception):
db.getDbBranchByGitBranch([])
with self.assertRaises(Exception):
db.getDbBranchByGitBranch(['master'])
def test_getDbBranchByGitBranch_examples(self):
ans = db.getDbBranchByGitBranch("master")
self.assertEqual(ans, dbBranchType.DbBranchType.MASTER)
ans = db.getDbBranchByGitBranch("devel")
self.assertEqual(ans, dbBranchType.DbBranchType.DEVEL)
ans = db.getDbBranchByGitBranch("feature_xy")
self.assertEqual(ans, dbBranchType.DbBranchType.DEVEL)
def test_getCurrentDbBranch_examples(self):
gitBranch = git.getCurrentBranch()
expectedDbBranch = db.getDbBranchByGitBranch(gitBranch)
self.assertEqual(expectedDbBranch, db.getCurrentDbBranch())
|
CyberDani/personal-roadmap | webPage/generator/generator.py | <gh_stars>0
import os
import sys
from defTypes import appDecisionType
from defTypes import buildSettings
from defTypes import buildType
from modules import argumentParser
from modules import counter
from modules import htmlBody
from modules import htmlBuilder
from modules import htmlHead
from modules import uTest
# this is the main function being run
def backupAndGenerateNewHtmlOutputFileIfAllUnitTestsPassDrivenByArguments():
args = argumentParser.getCommandLineArgs()
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
handleInvalidUsageIfRequired(invalidUsage)
stepsCounter = counter.SimpleCounter(1)
handleUnitTestsIfRequired(runUnitTests, stepsCounter)
handleBackupIfRequired(backup, stepsCounter)
handleBuildingIfRequired(buildOption, stepsCounter, dbBranch)
def handleInvalidUsageIfRequired(invalidUsage):
if not invalidUsage:
return
print(" [!] Invalid command")
print(*argumentParser.getScriptUsageLines(), sep="\n")
sys.exit()
def handleUnitTestsIfRequired(runUnitTests, stepsCounter):
if not runUnitTests:
print(stepsCounter.getNextMessage('Skip unit tests'))
return
print(stepsCounter.getNextMessage('Evaluate unit tests . . .\n'))
result, lines = uTest.runAndEvaluateUnitTests('./unitTests/', '*_test.py')
print(*lines, sep="\n")
if result == appDecisionType.AppDecisionType.STOP_APP:
sys.exit()
def handleBackupIfRequired(backup, stepsCounter):
if not backup:
print(stepsCounter.getNextMessage('Skip making backups'))
return
print(stepsCounter.getNextMessage('Backup current files . . .'))
backupFiles()
def handleBuildingIfRequired(buildOption, stepsCounter, dbBranch):
if buildOption == buildType.BuildType.DO_NOT_BUILD:
print(stepsCounter.getNextMessage('Skip building'))
return
htmlOutputFilePath = "../../index.html"
htmlFile = open(htmlOutputFilePath, "w")
settings = buildSettings.BuildSettings(htmlOutputFile=htmlFile,
buildOption=buildOption,
dbBranch=dbBranch,
stepsCounter=stepsCounter,
indentDepth=2)
generateNewHtmlOutputFile(settings)
def backupFiles():
os.replace("../../index.html", "./backup/index.html")
def generateNewHtmlOutputFile(settings):
print(settings.stepsCounter.getNextMessage('Generate HTML files . . .'))
htmlBuilder.buildIndexHtmlFile(writeHtmlHeadContent, writeHtmlBodyContent, settings)
# <head>
def writeHtmlHeadContent(settings):
head = htmlHead.HtmlHead(settings.htmlOutputFile, settings.indentDepth)
head.setTitle("Programming puzzle-pieces") \
.setFavicon("./webPage/images/favicon.png") \
.setMetaScreenOptimizedForMobile() \
.includeFileAsInlineCSS("./htmlIncludes/inlineCssStyle.css")
head.addFontAwesome_v611() \
.addJquery_v360() \
.addGoogleIcons() \
.addMaterialize_v110_alpha() \
.addGoogleFont("?family=Arima+Madurai:wght@500&display=swap") \
.addJQueryLoadingOverlay_v217()
# <body>
def writeHtmlBodyContent(settings):
body = htmlBody.HtmlBody(settings.htmlOutputFile, settings.indentDepth)
body.includeFileThenAppendNewLine("./htmlIncludes/topNav.txt") \
.includeFileThenAppendNewLine("./htmlIncludes/sideNav.txt") \
.includeFileThenAppendNewLine("./htmlIncludes/topQuote.txt") \
.addHtmlNewLineThenAppendNewLine(1)
body.openHtmlTagThenAppendNewLine("div", "id=\"webContent\"") \
.includeFileThenAppendNewLine("../pages/mainPage/svgCurve1.txt") \
.includeFileThenAppendNewLine("../pages/mainPage/whatThisProjectOffers.txt") \
.includeFileThenAppendNewLine("../pages/mainPage/svgCurve2.txt") \
.includeFileThenAppendNewLine("../pages/mainPage/personalRecommendation.txt") \
.includeFileThenAppendNewLine("../pages/mainPage/svgCurve3.txt") \
.includeFileThenAppendNewLine("../pages/mainPage/textBelowCurves.txt") \
.closeLastOpenedHtmlTag() # div#webContent
body.includeFileThenAppendNewLine("./htmlIncludes/footer.txt") \
.addJsScriptSrcThenAppendNewLine("./webPage/scripts/githubApiScripts.js") \
.addJsScriptSrcThenAppendNewLine("./webPage/scripts/navigationScripts.js") \
.includeFileAsInlineJs("./htmlIncludes/inlineJs.js")
backupAndGenerateNewHtmlOutputFileIfAllUnitTestsPassDrivenByArguments()
|
CyberDani/personal-roadmap | webPage/generator/modules/stringUtil.py | <reponame>CyberDani/personal-roadmap<gh_stars>0
from modules import checks
# <endsWithStr> must be after <startsWithStr>
# Return empty string if
def getStringStartsWithEndsWithNoOverlap(src, startsWithStr, endsWithStr):
checks.checkIfString(src, 2, 500)
checks.checkIfString(startsWithStr, 1, 500)
checks.checkIfString(endsWithStr, 1, 500)
startIdx = src.find(startsWithStr)
if startIdx == -1:
return ""
idxAdd = startIdx + len(startsWithStr)
originalSrc = src
src = src[idxAdd:]
endIdx = src.find(endsWithStr)
if endIdx == -1:
return ""
endIdx += idxAdd + len(endsWithStr)
return originalSrc[startIdx:endIdx]
def rTrimNewLines(string):
checks.checkIfString(string, 0, 1000000)
trimmedString = string
while trimmedString.endswith("\n") or trimmedString.endswith("\r"):
trimmedString = trimmedString[:-1]
return trimmedString |
CyberDani/personal-roadmap | webPage/generator/unitTests/htmlHead_test.py | import sys
import unittest
sys.path.append('..')
from modules import htmlBuilder
from modules import filerw
from modules import htmlHead
from modules import webLibs
class HtmlHeadTests(unittest.TestCase):
def test_constructor_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlHead.HtmlHead(file, -2)
with self.assertRaises(Exception):
htmlHead.HtmlHead(file, None)
with self.assertRaises(Exception):
htmlHead.HtmlHead(file, "")
with self.assertRaises(Exception):
htmlHead.HtmlHead(file, True)
with self.assertRaises(Exception):
htmlHead.HtmlHead("./unitTests/temp/test.txt", 2)
with self.assertRaises(Exception):
htmlHead.HtmlHead(None, 2)
with self.assertRaises(Exception):
htmlHead.HtmlHead(False, 2)
def test_setTitle_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
with self.assertRaises(Exception):
head.setTitle("")
with self.assertRaises(Exception):
head.setTitle(None)
with self.assertRaises(Exception):
head.setTitle(23)
def test_setTitle_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.setTitle("Title 1")
with self.assertRaises(Exception):
head.setTitle("Title 2")
with self.assertRaises(Exception):
head.setTitle("Title 3")
def test_setTitle_example(self):
for title in ["title", "my page", "Look At This 23!#"]:
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["random string", "another random string"])
head = htmlHead.HtmlHead(file, 2)
head.setTitle(title)
file.close()
line = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(line), 3)
self.assertEqual(line[0], "random string")
self.assertEqual(line[1], "another random string")
self.assertEqual(line[2], htmlBuilder.getHtmlTitle(title, 2))
def test_setFavicon_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
with self.assertRaises(Exception):
head.setFavicon("")
with self.assertRaises(Exception):
head.setFavicon(None)
with self.assertRaises(Exception):
head.setFavicon(23)
def test_setFavicon_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.setFavicon("icon1.png")
with self.assertRaises(Exception):
head.setFavicon("icon1.png")
with self.assertRaises(Exception):
head.setFavicon("icon2.png")
def test_setFavicon_example(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["random string", "another random string"])
head = htmlHead.HtmlHead(file, 2)
head.setFavicon("./images/logo.ico")
file.close()
line = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(line), 3)
self.assertEqual(line[0], "random string")
self.assertEqual(line[1], "another random string")
self.assertEqual(line[2], htmlBuilder.getHtmlFavicon("./images/logo.ico", 2))
def test_setMetaScreenOptimizedForMobile_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.setMetaScreenOptimizedForMobile()
with self.assertRaises(Exception):
head.setMetaScreenOptimizedForMobile()
with self.assertRaises(Exception):
head.setMetaScreenOptimizedForMobile()
def test_setMetaScreenOptimizedForMobile_example(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["random string", "another random string"])
head = htmlHead.HtmlHead(file, 2)
head.setMetaScreenOptimizedForMobile()
file.close()
line = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(line), 3)
self.assertEqual(line[0], "random string")
self.assertEqual(line[1], "another random string")
self.assertEqual(line[2], htmlBuilder.getMetaScreenOptimizedForMobile(2))
def test_includeFileAsInlineCSS_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
with self.assertRaises(Exception):
head.includeFileAsInlineCSS(file)
with self.assertRaises(Exception):
head.includeFileAsInlineCSS("")
with self.assertRaises(Exception):
head.includeFileAsInlineCSS(None)
with self.assertRaises(Exception):
head.includeFileAsInlineCSS(23)
def test_includeFileAsInlineCSS_example(self):
file = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["random string", "another random string"])
file.close()
fileDest = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(fileDest, ["<html>", "\t<head>"])
head = htmlHead.HtmlHead(fileDest, 3)
head.includeFileAsInlineCSS("./unitTests/temp/test2.txt")
fileDest.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 6)
self.assertEqual(lines[0], "<html>")
self.assertEqual(lines[1], "\t<head>")
self.assertEqual(lines[2], "\t\t\t<style>")
self.assertEqual(lines[3], "\t\t\t\trandom string")
self.assertEqual(lines[4], "\t\t\t\tanother random string")
self.assertEqual(lines[5], "\t\t\t</style>")
def test_addFontAwesome_v611_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.addFontAwesome_v611()
with self.assertRaises(Exception):
head.addFontAwesome_v611()
with self.assertRaises(Exception):
head.addFontAwesome_v611()
def test_addFontAwesome_v611_example(self):
# get lines to compare with
file = open("./unitTests/temp/test2.txt", "w")
webLibs.addFontAwesome_v611(file, 3)
file.close()
faLines = filerw.getLinesByFilePath("./unitTests/temp/test2.txt")
# add lib and compare
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2", "line 3"])
head = htmlHead.HtmlHead(file, 3)
head.addFontAwesome_v611()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3 + len(faLines))
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "line 3")
i = 0
for faLine in faLines:
self.assertEqual(faLine, lines[3 + i])
i += 1
def test_addJquery_v360_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.addJquery_v360()
with self.assertRaises(Exception):
head.addJquery_v360()
with self.assertRaises(Exception):
head.addJquery_v360()
def test_addFontAwesome_v611_example(self):
# get lines to compare with
file = open("./unitTests/temp/test2.txt", "w")
webLibs.addJquery_v360(file, 3)
file.close()
libLines = filerw.getLinesByFilePath("./unitTests/temp/test2.txt")
# add lib and compare
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2", "line 3"])
head = htmlHead.HtmlHead(file, 3)
head.addJquery_v360()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3 + len(libLines))
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "line 3")
i = 0
for libLine in libLines:
self.assertEqual(libLine, lines[3 + i])
i += 1
def test_addGoogleIcons_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.addGoogleIcons()
with self.assertRaises(Exception):
head.addGoogleIcons()
with self.assertRaises(Exception):
head.addGoogleIcons()
def test_addGoogleIcons_example(self):
# get lines to compare with
file = open("./unitTests/temp/test2.txt", "w")
webLibs.addGoogleIcons(file, 4)
file.close()
libLines = filerw.getLinesByFilePath("./unitTests/temp/test2.txt")
# add lib and compare
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2", "line 3"])
head = htmlHead.HtmlHead(file, 4)
head.addGoogleIcons()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3 + len(libLines))
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "line 3")
i = 0
for libLine in libLines:
self.assertEqual(libLine, lines[3 + i])
i += 1
def test_addMaterialize_v110_alpha_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.addMaterialize_v110_alpha()
with self.assertRaises(Exception):
head.addMaterialize_v110_alpha()
with self.assertRaises(Exception):
head.addMaterialize_v110_alpha()
def test_addMaterialize_v110_alpha_example(self):
# get lines to compare with
file = open("./unitTests/temp/test2.txt", "w")
webLibs.addMaterialize_v110_alpha(file, 4)
file.close()
libLines = filerw.getLinesByFilePath("./unitTests/temp/test2.txt")
# add lib and compare
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2", "line 3"])
head = htmlHead.HtmlHead(file, 4)
head.addMaterialize_v110_alpha()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3 + len(libLines))
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "line 3")
i = 0
for libLine in libLines:
self.assertEqual(libLine, lines[3 + i])
i += 1
def test_addGoogleFont_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
with self.assertRaises(Exception):
head.addGoogleFont("")
with self.assertRaises(Exception):
head.addGoogleFont(True)
with self.assertRaises(Exception):
head.addGoogleFont(None)
with self.assertRaises(Exception):
head.addGoogleFont(23)
def test_addGoogleFont_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.addGoogleFont("gq4fg43qgq4wfq")
with self.assertRaises(Exception):
head.addGoogleFont("asd3as2das?asd")
with self.assertRaises(Exception):
head.addGoogleFont("g45g5434gqf")
def test_addGoogleFont_example(self):
# get lines to compare with
file = open("./unitTests/temp/test2.txt", "w")
webLibs.addGoogleFont(file, 5, "?fontName=TimesNewRoman&type=bold")
file.close()
libLines = filerw.getLinesByFilePath("./unitTests/temp/test2.txt")
# add lib and compare
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2", "line 3"])
head = htmlHead.HtmlHead(file, 5)
head.addGoogleFont("?fontName=TimesNewRoman&type=bold")
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3 + len(libLines))
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "line 3")
i = 0
for libLine in libLines:
self.assertEqual(libLine, lines[3 + i])
i += 1
def test_addJQueryLoadingOverlay_v217_multipleTimes(self):
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.addJQueryLoadingOverlay_v217()
with self.assertRaises(Exception):
head.addJQueryLoadingOverlay_v217()
with self.assertRaises(Exception):
head.addJQueryLoadingOverlay_v217()
def test_addJQueryLoadingOverlay_v217_example(self):
# get lines to compare with
file = open("./unitTests/temp/test2.txt", "w")
webLibs.addJQueryLoadingOverlay_v217(file, 5)
file.close()
libLines = filerw.getLinesByFilePath("./unitTests/temp/test2.txt")
# add lib and compare
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["line 1", "line 2", "line 3"])
head = htmlHead.HtmlHead(file, 5)
head.addJQueryLoadingOverlay_v217()
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3 + len(libLines))
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "line 2")
self.assertEqual(lines[2], "line 3")
i = 0
for libLine in libLines:
self.assertEqual(libLine, lines[3 + i])
i += 1
def test_function_chaining(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test2.txt",
["first line", "second line"])
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test3.txt",
["first line in this as well", "I also have a second line"])
file = open("./unitTests/temp/test.txt", "w")
head = htmlHead.HtmlHead(file, 2)
head.setTitle("Programming puzzle-pieces") \
.setFavicon("./webPage/images/favicon.png") \
.setMetaScreenOptimizedForMobile() \
.includeFileAsInlineCSS("./unitTests/temp/test2.txt") \
.addFontAwesome_v611() \
.addJquery_v360() \
.addGoogleIcons() \
.addMaterialize_v110_alpha() \
.addGoogleFont("?family=Arima+Madurai:wght@500&display=swap") \
.addJQueryLoadingOverlay_v217() \
.includeFileAsInlineCSS("./unitTests/temp/test3.txt")
|
CyberDani/personal-roadmap | webPage/generator/defTypes/appDecisionType.py | <reponame>CyberDani/personal-roadmap
from enum import Enum
class AppDecisionType(Enum):
STOP_APP = 0
CONTINUE_RUNNING = 1
|
commodo/pep517 | tests/samples/test-for-issue-104/setup.py | import sys
from setuptools import setup
from os import path, environ, listdir
import json
children = listdir(sys.path[0])
out = path.join(environ['PEP517_ISSUE104_OUTDIR'], 'out.json')
with open(out, 'w') as f:
json.dump(children, f)
setup()
|
commodo/pep517 | tests/test_meta.py | from __future__ import unicode_literals, absolute_import, division
import re
import pytest
from pep517 import meta
pep517_needs_python_3 = pytest.mark.xfail(
'sys.version_info < (3,)',
reason="pep517 cannot be built on Python 2",
)
def test_meta_for_this_package():
dist = meta.load('.')
assert re.match(r'[\d.]+', dist.version)
assert dist.metadata['Name'] == 'pep517'
def test_classic_package(tmpdir):
(tmpdir / 'setup.py').write_text(
'from distutils.core import setup; setup(name="foo", version="1.0")',
encoding='utf-8',
)
dist = meta.load(str(tmpdir))
assert dist.version == '1.0'
assert dist.metadata['Name'] == 'foo'
def test_meta_output(capfd):
"""load shouldn't emit any output"""
meta.load('.')
captured = capfd.readouterr()
assert captured.out == captured.err == ''
|
commodo/pep517 | tests/samples/buildsys_pkgs/buildsys_minimal.py | """Test backend defining only the mandatory hooks.
Don't use this for any real code.
"""
from glob import glob
from os.path import join as pjoin
import tarfile
from zipfile import ZipFile
def build_wheel(wheel_directory, config_settings, metadata_directory=None):
whl_file = 'pkg2-0.5-py2.py3-none-any.whl'
with ZipFile(pjoin(wheel_directory, whl_file), 'w') as zf:
for pyfile in glob('*.py'):
zf.write(pyfile)
for metadata in glob('*.dist-info/*'):
zf.write(metadata)
return whl_file
def build_sdist(sdist_directory, config_settings):
target = 'pkg2-0.5.tar.gz'
with tarfile.open(pjoin(sdist_directory, target), 'w:gz',
format=tarfile.PAX_FORMAT) as tf:
def _add(relpath):
tf.add(relpath, arcname='pkg2-0.5/' + relpath)
_add('pyproject.toml')
for pyfile in glob('*.py'):
_add(pyfile)
for distinfo in glob('*.dist-info'):
_add(distinfo)
return target
|
commodo/pep517 | tests/samples/buildsys_pkgs/buildsys_minimal_editable.py | from buildsys_minimal import build_sdist, build_wheel # noqa
build_editable = build_wheel
|
williamGOC/IFCOM-II-2020- | N-Body/figure.py | <filename>N-Body/figure.py
import matplotlib.pyplot as plt
import numpy as np
import gif
@gif.frame
def plot_bodies(data):
pass
plt.figure(figsize=(5, 3), dpi=100)
plt.title(r'$N = 3$', fontsize=20)
plt.xlim(xmin=-2, xmax=3.1)
plt.ylim(ymin=-3, ymax=3.3)
plt.xlabel(r'$x(t)$', fontsize=15)
plt.ylabel(r'$y(t)$', fontsize=15)
plt.plot(data[:,0], data[:,1], label=r'$m_{1}=150$')
plt.plot(data[:,2], data[:,3], label=r'$m_{2}=200$')
plt.plot(data[:,4], data[:,5], label=r'$m_{3}=250$')
plt.legend(fontsize=10)
plt.tight_layout()
def main():
pass
dataName = "data_N=3.dat"
name_gif = "./move.gif"
data = np.loadtxt(dataName, dtype=float)
N = len(data)
frames = []
for i in range(N):
frame = plot_bodies(data[:i])
frames.append(frame)
gif.save(frames, name_gif, duration=50)
if __name__ == '__main__':
main()
|
williamGOC/IFCOM-II-2020- | Simple Runge-Kutta method/lowPassFilter.py | <filename>Simple Runge-Kutta method/lowPassFilter.py
from math import sin,cos,exp,sqrt,log
from numpy import arange
import sys
import matplotlib.pyplot as plt
# ParΓ’metros da exibiΓ§Γ£o dos grΓ‘ficos
plt.rcParams['xtick.labelsize'] = 24
plt.rcParams['ytick.labelsize'] = 24
plt.rcParams['axes.labelsize'] = 28
RC = float(sys.argv[1])
def vinquad(t):
vin = 1.
if int(2*t)%2 == 1:
vin = -1.
return vin
def fquad(x,t):
return (vinquad(t) - x)/RC
def passo_rk4(f,x,t,h): # Calcula um passo no mΓ©todo de RK4
k1 = h*f(x,t)
k2 = h*f(x+0.5*k1,t+0.5*h)
k3 = h*f(x+0.5*k2,t+0.5*h)
k4 = h*f(x+k3,t+h)
return (k1+2.0*(k2+k3)+k4)/6.0
def sol_exata(t,a,xa): # VΓ‘lida somente para fquad
n = int((t-a)/0.5)
x0 = xa
t0 = a
sinal = 1.
vin = sinal
for i in range(n):
xf = vin - (vin - x0)*exp(-0.5/RC)
t0 = t0 + 0.5
x0 = xf
sinal = -sinal
vin = sinal
xf = vin - (vin - x0)*exp(-(t-t0)/RC)
return xf
def main():
a = 0.00 # InΓcio do intervalo
b = 10.0 # Final do intervalo
xa = 0.0 # CondiΓ§Γ£o inicial, ou seja, x(a)
N_exato = 1000 # NΓΊmero de pontos para o grΓ‘fico da soluΓ§Γ£o exata
h_exato = (b-a)/N_exato
t_exato = arange(a,b,h_exato)
x_exato = []
for t in t_exato: # Criando as listas para o grΓ‘fico da funΓ§Γ£o
x_exato.append(sol_exata(t,a,xa))
N = 1000 # NΓΊmero de passos da integraΓ§Γ£o numΓ©rica
h = (b-a)/N # Tamanho do passo da integraΓ§Γ£o
xrk4 = xa
t_rk4 = arange(a,b,h)
x_rk4 = []
for t in t_rk4: # Realizando a integraΓ§Γ£o numΓ©rica
x_rk4.append(xrk4)
xrk4 += passo_rk4(fquad,xrk4,t,h)
plt.figure(figsize=(12,9))
plt.plot(t_rk4,x_rk4,'r-',t_exato,x_exato,'bo')
plt.xlabel("$t$")
plt.ylabel("$V_{out}(t)$")
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main() |
williamGOC/IFCOM-II-2020- | Molecular_Dynamic_2D_System/molecularDynamic2D.py | <filename>Molecular_Dynamic_2D_System/molecularDynamic2D.py
import gif
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from random import randint
# Global variables os the system
L = 27.386127875258307
NC = 15
RHO = 0.3
EMPTY = -1
N = 225
class particles(object):
"""docstring for particles"""
def __init__(self, force, passTime):
super(particles, self).__init__()
self.force = force
self.passTime = passTime
self.counter = 0
self.position = np.zeros((N, 2), dtype=float)
self.momentum = np.zeros((N, 2), dtype=float)
self.acelerat = np.zeros((N, 2), dtype=float)
for i in range(N):
self.position[i][0] = (i % NC) * L / (NC + 1)
self.position[i][1] = (i / NC) * L / (NC + 1)
self.momentum[i][0] = 1.1 if (randint(1,10)%2 == 0) else -1.1
self.acelerat = self.force(self.position)
def __call__(self):
pass
self.verletIterator()
self.peridCondition()
def __del__(self):
pass
del self.passTime
del self.counter
del self.force
del self.position
del self.acelerat
del self.momentum
def verletIterator(self):
pass
self.momentum += self.acelerat * self.passTime / 2
self.position += self.momentum * self.passTime
self.acelerat = self.force(self.position)
self.momentum += self.acelerat * self.passTime / 2
self.counter += 1
def peridCondition(self):
pass
size = 1.0 / L
self.position -= np.floor(self.position * size) * L
@gif.frame
def printerSystem(self):
pass
plt.figure(figsize=(7, 5), dpi=100)
plt.title('time = {:.3f}'.format(self.passTime * self.counter))
plt.xlim(xmin=1, xmax=L)
plt.ylim(ymin=1, ymax=L)
plt.axis('off')
plt.plot(self.position[:,0], self.position[:,1], linestyle='' , marker='o', markersize=2)
def minimalImage(r_i, r_j):
pass
r_ij = r_i - r_j
return r_ij - L * np.round(r_ij / L)
def d_ij(r_i, r_j):
pass
return np.linalg.norm(minimalImage(r_i, r_j))
def f_ij(r_i, r_j):
pass
d = d_ij(r_i, r_j)
v_ij = 48 * ((d ** -14) - (d ** -8) / 2)
return v_ij * minimalImage(r_i, r_j)
def f(position):
pass
force = np.zeros((N, 2), dtype=float)
for i in range(N):
for j in range(i+1, N):
force[i] += f_ij(position[i], position[j])
force[j] -= f_ij(position[i], position[j])
return force
def main():
pass
sys = particles(f, 0.005)
name_gif = "./move.gif"
frames = []
for i in range(1000):
frame = sys.printerSystem()
frames.append(frame)
sys()
gif.save(frames, name_gif, duration=200)
if __name__ == '__main__':
main() |
williamGOC/IFCOM-II-2020- | N-Body/pybody.py | """
Author: <NAME>
Date: April 19, 2020, NeuquΓ©, Argentina
module body: This module has implemented a series of functions and objects
that will be useful when solving the problem of the N bodies.
"""
# necessary modules
import numpy as np
from copy import copy
class body(object):
def __init__(self, mass, rVec):
super(body, self).__init__()
self.mass = mass
self.rVec = rVec
self.vVec = np.array([0, 0], dtype=float)
def __str__(self):
return "body object: M = {}, R = ({}, {}), V = ({}, {})".format(self.mass,
self.rVec[0], self.rVec[1], self.vVec[0], self.vVec[1])
def setV(self, newV):
self.vVec = newV
def setR(self, newR):
self.rVec = newR
def gravitationForce(self, P):
return (P.mass * (P.rVec - self.rVec))/np.linalg.norm(P.rVec - self.rVec)**3
|
williamGOC/IFCOM-II-2020- | EDO_2_ORDER/EDO_2.py | import numpy as np
class EDO_Solver(object):
"""docstring for EDO_Solver"""
def __init__(self, f, x, v, dt):
super(EDO_Solver, self).__init__()
self.f = f
self.x = x
self.v = v
self.dt = dt
self.a = self.f(self.x)
self.counter = 0
def totalEnergy(self):
return (self.v**2 + self.x**2)/2
def eulerIteration(self):
self.x += self.dt * self.v
self.v += self.dt * self.a
self.a = self.f(self.x)
self.counter += 1
def rk2Iteration(self):
xt = self.x + (self.dt * self.v)/2
vt = self.v + (self.dt * self.a)/2
self.x += self.dt * vt
self.v += self.dt * self.f(xt)
self.a = self.f(self.x)
self.counter += 1
def verletIteration(self):
self.v += (self.dt * self.a)/2
self.x += self.dt * self.v
self.a = self.f(self.x)
self.v += (self.dt * self.a)/2
self.counter += 1
def force(x):
return -x
def main():
x_0 = 1.0
v_0 = 1.0
dt = 0.005
T = 10.0
s = EDO_Solver(force, x_0, v_0, dt)
for i in range(int(T/dt)+1):
print("{}\t{}\t{}\t{}".format(s.counter*dt, s.x, s.v, s.totalEnergy()))
s.verletIteration()
if __name__ == '__main__':
main() |
williamGOC/IFCOM-II-2020- | N-Body/nBody.py | import numpy as np
import matplotlib.pyplot as plt
from pybody import *
def totalForce(element, system):
"""
Total force acting on the body "element" which is in the body
system "system"
Keyword Arguments:
system ---> is a list object with each element as body object
element ---> is a body object
Return ---> 1 x 2 numpy array.
"""
pass
force = np.array([0, 0], dtype=float)
copySystem = copy(system)
copySystem.remove(element)
for elemNow in copySystem:
force += element.gravitationForce(elemNow)
return force
def farceSystem(system, K, nbodies):
"""
Creates a copy of a system with generalized coordinates offset
by a quantity "K"
Keyword Arguments:
system ---> is a list object with each element as body object
K ---> is a N x 2 numpy array
nbodies ---> is a int (numbert of bodies)
Return: list object with a copy of each element as body object
"""
pass
copySystem = [copy(system[i]) for i in range(nbodies)]
updateSystem(copySystem, K, nbodies)
return copySystem
def updateSystem(system, K, nbodies):
"""
Creates a system with generalized coordinates offset
by a quantity "K"
Keyword Arguments:
system ---> is a list object with each element as body object
K ---> is a N x 2 numpy array
nbodies ---> is a int (numbert of bodies)
"""
pass
for i in range(nbodies):
system[i].setR(system[i].rVec + K[i])
system[i].setV(system[i].vVec + K[nbodies + i])
def F(system):
"""
This function calculates the generalized force acting on each
body of our system.
Keyword Arguments:
system ---> is a list object with each element as body object.
Return: N x 2 numpy array object with the generalized forces.
"""
pass
GenericV = [element.vVec for element in system]
GenericF = [totalForce(element, system) for element in system]
return np.array(GenericV + GenericF)
def stepRK4(F, system, h, nbodies):
"""
Implementation of Runge-Kutta Algorithm of 4-order.
Keyword Arguments:
F ---> N x 2 numpy array object with the generalized forces.
system ---> is a list object with each element as body object.
h ---> float object.
Return: N x 2 numpy array object with the correction in generalized coordinates.
"""
pass
K1 = h * F(system)
K2 = h * F(farceSystem(system, K1/2, nbodies))
K3 = h * F(farceSystem(system, K2/2, nbodies))
K4 = h * F(farceSystem(system, K3, nbodies))
return (K1 + 2 * (K2 + K3) + K4)/6
def adaptativePass(F, system, h, nbodies, prec=1e-6):
"""
Implementation of adaptive steps with interpolation.
Keyword Arguments:
F ---> N x 2 numpy array object with the generalized forces.
system ---> is a list object with each element as body object.
h ---> float object.
Return: tuple object.
"""
pass
rate = 1.0 + 1e-10
while rate >= 1.0 + 1e-10:
h /= rate
dX12 = stepRK4(F, system, h, nbodies)
dX1 = dX12 + stepRK4(F, farceSystem(system, dX12, nbodies), h, nbodies)
dX2 = stepRK4(F, system, 2*h, nbodies)
epsilon = (dX2 - dX1)/30
error = max([ np.linalg.norm(np.array(epsilon[k])) for k in range(nbodies)])
rate = (error/(h*prec))**0.25
h_approx = min(h/(rate+1e-10), 2*h)
dX = dX2 + (dX2-dX1)/15
return dX, 2*h, h_approx
def main():
t_0 = 0.0 # Start of time interval.
t_N = 2.0 # End of time interval.
t = t_0 # Initialization time.
h = 1e-3 # Step's size.
h_now = h # Initialization step's size.
# Creation of the bodies of our system.
A = body(1, np.array([-1, 1], dtype=float))
B = body(1, np.array([-1, -1], dtype=float))
C = body(1, np.array([1, -1],dtype=float))
D = body(1, np.array([1, 1],dtype=float))
system = [A, B, C, D]
N = len(system)
# upgrade steps.
while t <= t_N:
dX, h_now, h_approx = adaptativePass(F, system, h, N)
t += h_now
updateSystem(system, dX, N)
h = h_approx
# printer system
printer = ''
for element in system:
printer += '{}\t{}\t'.format(element.rVec[0], element.rVec[1])
print(printer)
printer = ''
if __name__ == '__main__':
main() |
williamGOC/IFCOM-II-2020- | Simple Runge-Kutta method/inftyIntegrator.py | from math import sin,cos,exp,sqrt,log,tanh
from numpy import arange
import matplotlib.pyplot as plt
# ParΓ’metros da exibiΓ§Γ£o dos grΓ‘ficos
plt.rcParams['xtick.labelsize'] = 24
plt.rcParams['ytick.labelsize'] = 24
plt.rcParams['axes.labelsize'] = 28
gama, omega = 1.0, 10.0
def g(x,u):
t = 0.5*log((1.0+u)/(1.0-u))
f = 1.0/(x**2+t**2)
return f/(1.0-u**2)
def passo_rk4(f,x,t,h): # Calcula um passo no mΓ©todo de RK4
k1 = h*f(x,t)
k2 = h*f(x+0.5*k1,t+0.5*h)
k3 = h*f(x+0.5*k2,t+0.5*h)
k4 = h*f(x+k3,t+h)
return (k1+2.0*(k2+k3)+k4)/6.0
def main():
a = 0. # InΓcio do intervalo
b = tanh(8.) # Final do intervalo
xa = 1.0 # CondiΓ§Γ£o inicial, ou seja, x(a)
N = 1000000 # NΓΊmero de passos da integraΓ§Γ£o numΓ©rica
h = (b-a)/N # Tamanho do passo da integraΓ§Γ£o
xrk4 = xa
u_rk4 = []
t_rk4 = []
x_rk4 = []
u = a
t = 0.5*log((1.0+u)/(1.0-u))
u_rk4.append(u)
t_rk4.append(t)
x_rk4.append(xrk4)
for i in range(1,N+1): # Realizando a integraΓ§Γ£o numΓ©rica
xrk4 += passo_rk4(g,xrk4,u,h)
u = u + h
t = 0.5*log((1.0+u)/(1.0-u))
u_rk4.append(u)
t_rk4.append(t)
x_rk4.append(xrk4)
plt.figure(figsize=(12,9))
plt.xlim(0.,8.)
plt.plot(t_rk4,x_rk4)
plt.xlabel("t")
plt.ylabel("x(t)")
plt.show()
if __name__ == '__main__':
main()
|
sentashani/stolgo | lib/stolgo/nse_data.py | <gh_stars>0
import os
import requests
import concurrent.futures
import io
from datetime import datetime,timedelta
import pandas as pd
from pandas import ExcelWriter
from requests.exceptions import HTTPError
from bs4 import BeautifulSoup
from stolgo.nse_urls import NseUrls
from stolgo.helper import get_formated_date
from stolgo.request import RequestUrl
#default params for url connection
DEFAULT_TIMEOUT = 5 # seconds
MAX_RETRIES = 2
class NseData:
def __init__(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES):
self.__nse_urls = NseUrls()
self.__headers = self.__nse_urls.header
#create request
self.__request = RequestUrl()
def get_oc_exp_dates(self,symbol):
"""get current available expiry dates
Arguments:
symbol {[string]} -- symbol name
"""
try:
base_oc_url = self.__nse_urls.get_option_chain_url(symbol)
page = self.__request.get(base_oc_url,headers=self.__headers)
soup = BeautifulSoup(page.text,'lxml')
table = soup.find("select",{"id":"date"})
expiry_out = table.find_all("option")
expiry_dates = [exp_date.get("value") for exp_date in expiry_out][1:]
return expiry_dates
except Exception as err:
raise Exception("something went wrong while reading nse URL :", str(err))
def get_option_chain_df(self, symbol, expiry_date,dayfirst=False):
""" This function fetches option chain data and returns
in the form of pandas data frame
Arguments:
symbol {[string]} -- [stock symbol]
expiry_date {[string]} -- [expiry date]
dayfirst{[bool]} -- [to consider date first, european style DD/MM/YYYY]
"""
try:
oc_url = self.__nse_urls.get_option_chain_url(symbol, expiry_date,dayfirst)
# If the response was successful, no Exception will be raised
oc_page = self.__request.get(oc_url, headers = self.__headers)
except Exception as err:
raise Exception("Error occured while connecting NSE :", str(err))
else:
try:
dfs = pd.read_html(oc_page.text)
return dfs[1]
except Exception as err:
raise Exception("Error occured while reading html :", str(err))
def __get_file_path(self, file_name, file_path = None, is_use_default_name = True):
try:
if not file_path:
file_path = os.getcwd()
if os.path.isfile(file_path):
if (not is_use_default_name):
return file_path
# if need to use default file path, we get parent path
else:
file_path = os.path.dirname(file_path)
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d_%B_%H_%M")
file_name = file_name + "_" + dt_string + ".xlsx"
excel_path = os.path.join(file_path, file_name)
return excel_path
except Exception as err:
print("Error while naming file. Error: ", str(err))
def get_option_chain_excel(self, symbol, expiry_date,dayfirst=False,file_path = None, is_use_default_name = True):
"""This fucntion fetches option chain data and returns
in the form of excel (.xlsx)
Arguments:
symbol {[string]} -- [stock symbol]
expiry_date {[string]} -- [expiry date]
dayfirst{[bool]} -- [to consider date first, european style DD/MM/YYYY]
Keyword Arguments:
file_path {[string]} -- [filepath or folder path] (default: {None})
is_use_default_name {bool} -- [to set file name ] (default: {True})
"""
try:
df = self.get_option_chain_df(symbol, expiry_date,dayfirst)
file_name = symbol + "_" + expiry_date
excel_path = self.__get_file_path(file_name, file_path, is_use_default_name)
writer = ExcelWriter(excel_path)
df.to_excel(writer, file_name)
writer.save()
except Exception as err:
raise Exception("Error occured while getting excel :", str(err))
def __join_part_oi_dfs(self,df_join,df_joiner):
""" will append joiner to join
Arguments:
df_join {[dict]} -- [Dictionary of participants]
df_joiner {[dict]} -- [Dictionary of participants]
"""
for client in df_join:
df_join[client] = self.__join_dfs(df_join[client],df_joiner[client]).sort_index()
def __join_dfs(self,join,joiner):
"""will append joiner to join
Arguments:
join {[dataframe]} -- [will get appended]
joiner {[dataframe]} -- [df which will be appended in join df]
"""
return join.append(joiner)
def get_part_oi_df(self,start=None,end=None,periods=None,dayfirst=False,workers=None):
"""Return dictionary of participants containing data frames
Keyword Arguments:
start {[string]} -- [start time ] (default: {None})
end {[string]} -- [end time] (default: {None})
periods {[interger]} -- [number of days] (default: {None})
dayfirst {bool} -- [True if date in DD/MM/YYY format] (default: {False})
workers {[integer]} -- [Number of threads for requesting nse] (default: {None})
Returns:
[dictionary] -- [dict of participants containing dataframes]
"""
try:
#format date just in case
if start:
start = get_formated_date(start,dayfirst=dayfirst)
if end:
end = get_formated_date(end,dayfirst=dayfirst)
#get urls for these days
dates = pd.date_range(start=start,end=end, periods=periods,freq='B')
url_date = [(self.__nse_urls.get_participant_oi_url(date),date) for date in dates]#
oi_clm = self.__nse_urls.part_oi_clm
#lets preinitialize, better readability
oi_dfs = { "Client":pd.DataFrame(columns=oi_clm,index=dates),
"DII":pd.DataFrame(columns=oi_clm,index=dates),
"FII":pd.DataFrame(columns=oi_clm,index=dates),
"Pro":pd.DataFrame(columns=oi_clm,index=dates),
"TOTAL":pd.DataFrame(columns=oi_clm,index=dates)
}
if not workers:
workers = os.cpu_count() * 2
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
responses = {executor.submit(self.__request.get, url,self.__headers): (url,date) for url,date in url_date}
for res in concurrent.futures.as_completed(responses):
url,date = responses[res]
try:
csv = res.result()
except Exception as exc:
#might be holiday
pass
else:
df = pd.read_csv(io.StringIO(csv.content.decode('utf-8')))
#drop the first header
df_header = df.iloc[0]
#is there any implace way?
df = df[1:]
df.columns = df_header
df.set_index('Client Type',inplace=True)
#lets us create data frome for all client type
oi_dfs['Client'].loc[date] = df.loc['Client']
oi_dfs['FII'].loc[date] = df.loc['FII']
oi_dfs['DII'].loc[date] = df.loc['DII']
oi_dfs['Pro'].loc[date] = df.loc['Pro']
oi_dfs['TOTAL'].loc[date] = df.loc['TOTAL']
if not oi_dfs['Client'].empty:
#remove nan row
for client in oi_dfs:
oi_dfs[client].dropna(inplace=True)
#if holiday occured in business day, lets retrive more data equivalent to holdidays.
if oi_dfs['Client'].shape[0] < periods:
new_periods = periods - oi_dfs['Client'].shape[0]
try:
#if only start, find till today
if start and (not end):
s_from = oi_dfs['Client'].index[-1] + timedelta(1)
e_till = None
#if not start, can go to past
elif(end and (not start)):
s_from = None
e_till = oi_dfs['Client'].index[0] - timedelta(1)
#if start and end, no need to change
else:
return oi_dfs
except IndexError as err:
raise Exception("NSE Access error.size down/clean cookies to resolve the issue.")
except Exception as exc:
raise Exception("participant OI error: ",str(exc))
oi_dfs_new = self.get_part_oi_df(start = s_from,end = e_till,periods = new_periods)
self.__join_part_oi_dfs(oi_dfs,oi_dfs_new)
return oi_dfs
except Exception as err:
raise Exception("Error occured while getting part_oi :", str(err))
def get_data(self,symbol,series="EQ",start=None,end=None,periods=None,dayfirst=False):
"""get_data API retuns stock data
Arguments:
symbol {[string]} -- [stock symbol as per nse]
Keyword Arguments:
series {str} -- [segment type] (default: {"EQ"})
start {[string]} -- [start date] (default: {None})
end {[string]} -- [end date] (default: {None})
periods {[integer]} -- [number of days] (default: {None})
dayfirst {bool} -- [True if date in DD/MM/YYY format, date first then month] (default: {False})
"""
try:
data_url = self.__nse_urls.get_stock_data_url\
(
symbol.upper(),series=series,start=start,
end=end,periods=periods,dayfirst=dayfirst
)
csv = self.__request.get(data_url,headers=self.__headers)
dfs = pd.read_csv(io.StringIO(csv.content.decode('utf-8')))
dfs.set_index('Date ',inplace=True)
# Converting the index as date
dfs.index = pd.to_datetime(dfs.index)
if dfs.shape[0] < periods:
new_periods = periods - dfs.shape[0]
try:
#if only start, find till today
if start and (not end):
s_from = dfs.index[0] + timedelta(1)
e_till = None
#if not start, can go to past
elif(end and (not start)):
s_from = None
e_till = dfs.index[-1] - timedelta(1)
#if start and end, no need to change
else:
return dfs
except IndexError as err:
raise Exception("NSE Access error.")
except Exception as exc:
raise Exception("Stock data error: ",str(exc))
dfs_new = self.get_data(symbol,series,start = s_from,end = e_till,periods = new_periods)
dfs = self.__join_dfs(dfs,dfs_new).sort_index(ascending=False)
return dfs
except Exception as err:
raise Exception("Error occured while stock data :", str(err)) |
sentashani/stolgo | lib/stolgo/request.py | import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
DEFAULT_TIMEOUT = 5 # seconds
MAX_RETRIES = 2
#class TimeoutHTTPAdapter credit : https://github.com/psf/requests/issues/3070#issuecomment-205070203
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
class RequestUrl:
def __init__(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES):
self.session = self.get_session(timeout=timeout,max_retries=max_retries)
def get_session(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES):
retry = Retry(
total=max_retries,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504],
)
adapter = TimeoutHTTPAdapter(max_retries=max_retries,timeout=timeout)
session = requests.Session()
session.mount("https://", adapter)
session.mount("http://", adapter)
return session
def get(self,url,headers=None):
try:
page = self.session.get(url,headers=headers)
# If the response was successful, no Exception will be raised
page.raise_for_status()
return page
except requests.HTTPError as http_err:
raise Exception("HTTP error occurred while fetching url :", str(http_err.response.content)) |
sentashani/stolgo | lib/stolgo/nse_urls.py | <filename>lib/stolgo/nse_urls.py<gh_stars>0
from datetime import datetime,date
import pandas as pd
from stolgo.helper import get_formated_date,get_formated_dateframe
class NseUrls:
def __init__(self):
self.__OPTION_CHAIN_BASE_URL = r"https://www1.nseindia.com/live_market/dynaContent/"\
+ r"live_watch/option_chain/optionKeys.jsp?symbol="
#In EnocodedURI
self.__PARTICIPANT_OI_PRE_URL = r"https://www.nseindia.com/api/reports?archives=%5B%7B%22name"\
+ r"%22%3A%22F%26O%20-%20Participant%20wise%20Trading%20Volumes(csv)"\
+ r"%22%2C%22type%22%3A%22archives%22%2C%22category%22%3A%22derivatives"\
+ r"%22%2C%22section%22%3A%22equity%22%7D%5D&date="
self.__PARTICIPANT_OI_POST_URL = r"&type=equity&mode=single"
#Dictionary contains nse date formats for urls
self.nse_date_formats = {
"opt_chain":'%d%b%Y',
"part_oi":'%d-%b-%Y',
"stock_data":'%d-%m-%Y'
}
#browser like header to avoid error 403
self.header = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"}
#paticipant wise OI
self.part_oi_clm = ['Future Index Long', 'Future Index Short', 'Future Stock Long',
'Future Stock Short\t', 'Option Index Call Long',
'Option Index Put Long', 'Option Index Call Short',
'Option Index Put Short', 'Option Stock Call Long',
'Option Stock Put Long', 'Option Stock Call Short',
'Option Stock Put Short', 'Total Long Contracts\t',
'Total Short Contracts']
#stock data
self.__HIST_DATA_PRE_URL = r"https://www.nseindia.com/api/historical/cm/equity?symbol="
def get_option_chain_url(self,symbol,expiry_date=None,dayfirst=False):
try:
if expiry_date:
expiry_date = get_formated_date(expiry_date,self.nse_date_formats["opt_chain"],dayfirst).upper()
complete_url = self.__OPTION_CHAIN_BASE_URL + symbol + "&date=" + expiry_date
return complete_url
else:
return self.__OPTION_CHAIN_BASE_URL + symbol
except Exception as err:
raise Exception("Error occured while getting OC url, Error: ",str(err))
def get_participant_oi_url(self,date,dayfirst=False):
try:
date = get_formated_date(date,format=self.nse_date_formats["part_oi"],dayfirst=dayfirst)
url = self.__PARTICIPANT_OI_PRE_URL + date + self.__PARTICIPANT_OI_POST_URL
return url
except Exception as err:
raise Exception("Error occured while getting participant OI. ", str(err))
def get_stock_data_url(self,symbol,series="EQ",start=None,end=None,periods=None,dayfirst=False):
try:
#Step 1: format date
if start:
start = get_formated_dateframe(start,format=self.nse_date_formats["stock_data"],dayfirst=dayfirst)
if end:
end = get_formated_dateframe(end,format=self.nse_date_formats["stock_data"],dayfirst=dayfirst)
#Step 2: date range with peroids
#if only start, find till today
if start and (not end):
s_from = start
if periods:
e_till = s_from + pd.offsets.BDay(periods)
else:
e_till = get_formated_dateframe(format=self.nse_date_formats["stock_data"])
#if not start, can go to past
elif(end and (not start)):
e_till = end
if periods:
s_from = e_till - pd.offsets.BDay(periods)
else:
# last one year data
s_from = e_till - pd.offsets.BDay(255)
#if start and end, no need to change
elif(start and end):
s_from = start
e_till = end
# if no stat/end and periods given, we get past data of periods
else:
e_till = get_formated_dateframe(format=self.nse_date_formats["stock_data"])
if(periods):
s_from = e_till - pd.offsets.BDay(periods)
else:
# last one year data
s_from = e_till - pd.offsets.BDay(255)
#step3: Build url
s_from = s_from.strftime(self.nse_date_formats["stock_data"])
e_till = e_till.strftime(self.nse_date_formats["stock_data"])
url = self.__HIST_DATA_PRE_URL + symbol + r"&series=[%22" + series+\
r"%22]&from=" + s_from + r"&to=" + e_till + r"&csv=true"
return url
except Exception as err:
raise Exception("Error occured while getting stock data URL. ", str(err)) |
sentashani/stolgo | lib/stolgo/helper.py | <reponame>sentashani/stolgo
from datetime import date as dt
import pandas as pd
import requests
def get_formated_date(date=None,format=None,dayfirst=False):
"""string date to format date
"""
try:
if not date:
date = dt.today()
date_time = pd.to_datetime(date,dayfirst=dayfirst)
if not format:
format='%m/%d/%Y'
return date_time.strftime(format)
except Exception as err:
raise Exception("Error occured while formatting date, Error: ",str(err))
def get_formated_dateframe(date=None,format=None,dayfirst=False):
return pd.to_datetime(get_formated_date(date,format,dayfirst),format=format)
|
sentashani/stolgo | tests/test_nse_option_chain.py | <reponame>sentashani/stolgo
from stolgo.nse_data import NseData
def main():
nse_data = NseData()
nse_data.get_option_chain_excel('BANKNIFTY','30APR2020')
if __name__ == "__main__":
main()
|
Emgalai/mpd_queue_random_album | mpdrandom/mpdrandom.py | <filename>mpdrandom/mpdrandom.py
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import json
import random
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
import mpd
# Default Server info, change these values to match yours.
HOST = "192.168.88.241"
PORT = "6600"
PASSWORD = <PASSWORD>
__version__ = "1.0.0"
__author__ = "IbeeX"
def queue_random_album(client: mpd.MPDClient, cache: List[str]) -> Optional[str]:
albums: List[str] = client.list("album")
while True:
album_name = random.choice(albums)
if not album_name:
return None
if album_name in cache:
print(f"{album_name}, album was queaed recently skipping...")
continue
break
client.findadd("album", album_name)
album = client.find("album", album_name)[0]
if "albumartist" in album:
print(f"{album['albumartist']}: {album['album']}, from {len(albums)} albums.")
else:
print(f"{album['artist']}: {album['album']}, from {len(albums)} albums.")
return album_name
def enforce_cache_size(size: int, cache: List[str]) -> List[str]:
if len(cache) > size:
return cache[len(cache) - size :]
return cache
def get_cache_size(no_albums: int, percent: int = 10) -> int:
cache_size: int = int(no_albums / 100 * percent)
if no_albums <= 1:
cache_size = 0
elif cache_size == 0:
cache_size = 1
elif cache_size > 100:
cache_size = 100
return cache_size
def get_cache_file() -> Path:
cache_dir = Path.home() / ".cache" / "mpdrandom"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir / "cache.json"
def load_cache(cache_file: Path) -> List[str]:
json_cache: str = ""
if not cache_file.is_file():
return []
with cache_file.open() as f:
json_cache = f.read()
return json.loads(json_cache)
def save_cache(cache_file, cache):
with cache_file.open(mode="w") as f:
f.write(json.dumps(cache, indent=4, sort_keys=True))
def enqueue_current(client: mpd.MPDClient) -> None:
song: Dict[str, Any] = client.currentsong()
client.findadd("album", song["album"])
def enqueue_date(client: mpd.MPDClient, search_date: int) -> None:
client.searchadd("date", str(search_date))
def main(args):
client = mpd.MPDClient()
client.timeout = 10
client.idletimeout = None
client.connect(args.host, args.port)
if args.password:
client.password(args.password)
if args.options == 'current':
enqueue_current(client)
elif args.options == 'search':
enqueue_date(client, args.search_date)
elif args.options == 'random':
cache_file = get_cache_file()
cache = load_cache(cache_file)
albums: List[str] = client.list("album")
percent = get_cache_size(len(albums))
for n in range(args.number_off_albums):
album_name = queue_random_album(client, cache)
if not album_name:
break
cache.append(album_name)
cache = enforce_cache_size(percent, cache)
save_cache(cache_file, cache)
client.close()
client.disconnect()
def cli():
arguments = ArgumentParser(description="Control mpd deamon with custom commands")
subparser = arguments.add_subparsers(dest="options")
random_parser = subparser.add_parser("random", help="Enqueue random albums")
random_parser.add_argument(
"number_off_albums",
type=int,
nargs="?",
default=1,
help="number of albums to queue",
)
subparser.add_parser("current", help="Enqueue album based on song currently played")
search_parser = subparser.add_parser("search", help="Search albums by year relesed")
search_parser.add_argument(
"search_date",
type=int,
nargs="?",
default=datetime.now().year,
help="enque all albums from spesified year",
)
arguments.add_argument(
"-p",
"--port",
dest="port",
default=PORT,
help="specify mpd's port (defaults to {})".format(PORT),
metavar="PORT",
)
arguments.add_argument(
"-u",
"--host",
dest="host",
default=HOST,
help="specify mpd's host (defaults to {})".format(HOST),
metavar="HOST",
)
arguments.add_argument(
"--password",
dest="password",
default=PASSWORD,
help="specify mpd's password",
metavar="PASSWORD",
)
args = arguments.parse_args()
if args.options:
main(args)
else:
arguments.print_help()
|
Emgalai/mpd_queue_random_album | tests/test_cache.py | <reponame>Emgalai/mpd_queue_random_album
from mpdrandom.mpdrandom import get_cache_size, enforce_cache_size
def test_larger():
assert 100 == get_cache_size(1010, 10)
def test_smaler():
assert 10 == get_cache_size(100, 10)
assert 0 == get_cache_size(1, 10)
assert 0 == get_cache_size(0, 10)
assert 1 == get_cache_size(2, 10)
def test_size():
cache = list(range(10))
assert [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] == enforce_cache_size(10, cache)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] == enforce_cache_size(11, cache)
assert [1, 2, 3, 4, 5, 6, 7, 8, 9] == enforce_cache_size(9, cache)
assert [] == enforce_cache_size(0, cache)
|
Emgalai/mpd_queue_random_album | setup.py | #!/usr/bin/env python3
from setuptools import setup
setup(
name="mpdrandom",
packages=["mpdrandom"],
version="1.0.0",
description="mpd albums randomizing script",
author="IbeeX",
author_email="",
url="",
install_requires=["python-mpd2"],
keywords=["mpd", "album", "random", "shuffle", "music"],
license="License :: OSI Approved :: GNU General Public License (GPL)",
classifiers=["Programming Language :: Python", "Development Status :: 4 - Beta"],
python_requires=">=3.6",
long_description="""\
mpdrandom
---------
A script that adds the missing randomness to mpds albums
Features
* Queue albums randomly from the library
""",
entry_points="""
[console_scripts]
mpdcontrol=mpdrandom.mpdrandom:cli
""",
)
|
PACerv/gmm-torch | test.py | <reponame>PACerv/gmm-torch<filename>test.py
import numpy as np
import sklearn.mixture
import torch
from gmm import GaussianMixture
import unittest
class CpuCheck(unittest.TestCase):
"""
Basic tests for CPU.
"""
def testPredictClasses(self):
"""
Assert that torch.FloatTensor is handled correctly.
"""
x = torch.randn(400, 2)
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1))
model.fit(x)
y = model.predict(x)
# check that dimensionality of class memberships is (n)
self.assertEqual(torch.Tensor(x.size(0)).size(), y.size())
def testPredictProbabilities(self):
"""
Assert that torch.FloatTensor is handled correctly when returning class probabilities.
"""
x = torch.randn(400, 2)
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1))
model.fit(x)
# check that y_p has dimensions (n, k)
y_p = model.predict(x, probs=True)
self.assertEqual(torch.Tensor(x.size(0), n_components).size(), y_p.size())
def testEmMatchesDiagSkLearn(self):
"""
Assert that log-probabilities (E-step) and parameter updates (M-step) approximately match those of sklearn.
"""
d = 20
n_components = np.random.randint(1, 100)
# (n, k, d)
x = torch.randn(400, 1, d)
# (n, d)
x_np = np.squeeze(x.data.numpy())
var_init = torch.ones(1, n_components, d) - .4
model = GaussianMixture(n_components, d, var_init=var_init, covariance_type="diag")
model_sk = sklearn.mixture.GaussianMixture(n_components,
covariance_type="diag",
init_params="random",
means_init=np.squeeze(model.mu.data.numpy()),
precisions_init=np.squeeze(1. / np.sqrt(var_init.data.numpy())))
model_sk._initialize_parameters(x_np, np.random.RandomState())
log_prob_sk = model_sk._estimate_log_prob(x_np)
log_prob = model._estimate_log_prob(x)
# Test whether log-probabilities are approximately equal
np.testing.assert_almost_equal(np.squeeze(log_prob.data.numpy()),
log_prob_sk,
decimal=2,
verbose=True)
_, log_resp_sk = model_sk._e_step(x_np)
_, log_resp = model._e_step(x)
# Test whether E-steps are approximately equal
np.testing.assert_almost_equal(np.squeeze(log_resp.data.numpy()),
log_resp_sk,
decimal=0,
verbose=True)
model_sk._m_step(x_np, log_prob_sk)
pi_sk = model_sk.weights_
mu_sk = model_sk.means_
var_sk = model_sk.means_
pi, mu, var = model._m_step(x, log_prob)
# Test whether pi ..
np.testing.assert_almost_equal(np.squeeze(pi.data.numpy()),
pi_sk,
decimal=1,
verbose=True)
# .. mu ..
np.testing.assert_almost_equal(np.squeeze(mu.data.numpy()),
mu_sk,
decimal=1,
verbose=True)
# .. and var are approximately equal
np.testing.assert_almost_equal(np.squeeze(var.data.numpy()),
var_sk,
decimal=1,
verbose=True)
def testEmMatchesFullSkLearn(self):
"""
Assert that log-probabilities (E-step) and parameter updates (M-step) approximately match those of sklearn.
"""
d = 20
n_components = np.random.randint(1, 100)
# (n, k, d)
x = torch.randn(400, 1, d)
# (n, d)
x_np = np.squeeze(x.data.numpy())
var_init = torch.eye(d,dtype=torch.float64).reshape(1, 1, d, d).repeat(1,n_components,1, 1)
model = GaussianMixture(n_components, d, init_params="random", var_init=var_init, covariance_type="full")
model_sk = sklearn.mixture.GaussianMixture(n_components,
covariance_type="full",
init_params="random",
means_init=np.squeeze(model.mu.data.numpy()),
precisions_init=np.squeeze(np.linalg.inv(var_init)))
model_sk._initialize_parameters(x_np, np.random.RandomState())
log_prob_sk = model_sk._estimate_log_prob(x_np)
log_prob = model._estimate_log_prob(x)
# Test whether log-probabilities are approximately equal
np.testing.assert_almost_equal(np.squeeze(log_prob.data.numpy()),
log_prob_sk,
decimal=2,
verbose=True)
_, log_resp_sk = model_sk._e_step(x_np)
_, log_resp = model._e_step(x)
# Test whether E-steps are approximately equal
np.testing.assert_almost_equal(np.squeeze(log_resp.data.numpy()),
log_resp_sk,
decimal=0,
verbose=True)
model_sk._m_step(x_np, log_resp_sk)
pi_sk = model_sk.weights_
mu_sk = model_sk.means_
var_sk = model_sk.covariances_
pi, mu, var = model._m_step(x, log_resp)
# Test whether pi ..
np.testing.assert_almost_equal(np.squeeze(pi.data.numpy()),
pi_sk,
decimal=1,
verbose=True)
# .. mu ..
np.testing.assert_almost_equal(np.squeeze(mu.data.numpy()),
mu_sk,
decimal=1,
verbose=True)
# .. and var are approximately equal
np.testing.assert_almost_equal(np.squeeze(var.data.numpy()),
var_sk,
decimal=1,
verbose=True)
class GpuCheck(unittest.TestCase):
"""
Basic tests for GPU.
"""
def testPredictClasses(self):
"""
Assert that torch.cuda.FloatTensor is handled correctly.
"""
x = torch.randn(400, 2).cuda()
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1), covariance_type="diag").cuda()
model.fit(x)
y = model.predict(x)
# check that dimensionality of class memberships is (n)
self.assertEqual(torch.Tensor(x.size(0)).size(), y.size())
def testPredictProbabilities(self):
"""
Assert that torch.cuda.FloatTensor is handled correctly when returning class probabilities.
"""
x = torch.randn(400, 2).cuda()
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1), covariance_type="diag").cuda()
model.fit(x)
# check that y_p has dimensions (n, k)
y_p = model.predict(x, probs=True)
self.assertEqual(torch.Tensor(x.size(0), n_components).size(), y_p.size())
if __name__ == "__main__":
unittest.main()
|
PACerv/gmm-torch | utils.py | import torch
def calculate_matmul_n_times(n_components, mat_a, mat_b):
"""
Calculate matrix product of two matrics with mat_a[0] >= mat_b[0].
Bypasses torch.matmul to reduce memory footprint.
args:
mat_a: torch.Tensor (n, k, 1, d)
mat_b: torch.Tensor (1, k, d, d)
"""
res = torch.zeros(mat_a.shape).double().to(mat_a.device)
for i in range(n_components):
mat_a_i = mat_a[:, i, :, :].squeeze(-2)
mat_b_i = mat_b[0, i, :, :].squeeze()
res[:, i, :, :] = mat_a_i.mm(mat_b_i).unsqueeze(1)
return res
def calculate_matmul(mat_a, mat_b):
"""
Calculate matrix product of two matrics with mat_a[0] >= mat_b[0].
Bypasses torch.matmul to reduce memory footprint.
args:
mat_a: torch.Tensor (n, k, 1, d)
mat_b: torch.Tensor (n, k, d, 1)
"""
assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1
return torch.sum(mat_a.squeeze(-2) * mat_b.squeeze(-1), dim=2, keepdim=True)
|
The-SocialLion/Sentence-Prediction-Using-BERT | spub.py | # -*- coding: utf-8 -*-
"""SPUB.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cqgT4kKo8l52Rs_hozQQBVFZK8atvd8g
#**Sentence Prediction using Bert**
"""
pip install transformers
from transformers import BertTokenizer, BertForNextSentencePrediction
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
text = ("After <NAME> won the November 1860 presidential election on an "
"anti-slavery platform, an initial seven slave states declared their "
"secession from the country to form the Confederacy.")
text2 = ("War broke out in April 1861 when secessionist forces attacked Fort "
"Sumter in South Carolina, just over a month after Lincoln's "
"inauguration.")
# the text2 is a continuation of text (correlating sentences)
text1=("Rome is the capital city and a special comune of Italy (named Comune di Roma Capitale), as well as the capital of the Lazio region "
"The city has been a major human settlement for almost three millennia. With 2,860,009 residents in 1,285 km2 (496.1 sq mi) it is also the country's most populated comune "
"It is the third most populous city in the European Union by population within city limits "
"It is the centre of the Metropolitan City of Rome, which has a population of 4,355,725 residents, thus making it the most populous metropolitan city in Italy "
"Its metropolitan area is the third-most populous within Italy.[3] Rome is located in the central-western portion of the Italian Peninsula, within Lazio (Latium), along the shores of the Tiber "
"Vatican City (the smallest country in the world)[4] is an independent country inside the city boundaries of Rome, the only existing example of a country within a city for this reason Rome has sometimes been defined as the capital of two states
"Rome's Neighbouring country/city Carthage was one of its worst rival as due to the competetion of trade and goods and services ")
text3=("Carthage was the capital city of the ancient Carthaginian civilization, on the eastern side of the Lake of Tunis in what is now Tunisia "
"Carthage was the most important trading hub of the Ancient Mediterranean and one of the most affluent cities of the classical world "
"The city developed from a Phoenician colony into the capital of a Punic empire which dominated large parts of the Southwest Mediterranean during the first millennium BC "
"The legendary Queen Alyssa or Dido is regarded as the founder of the city, though her historicity has been questioned "
"According to accounts by Timaeus of Tauromenium, she purchased from a local tribe the amount of land that could be covered by an oxhide ")
# the text3 is a continuation of text1 (correlating sentences(twisted))
"""**The three fundamental steps to complete the same are**
**Tokenization,**
**Create classification label and**
**Calculate loss**
#**Tokenization**
"""
help(tokenizer)
inputs = tokenizer(text, text2, return_tensors='pt')
inputs.keys()
inputs1 = tokenizer(text1, text3, return_tensors='pt')
inputs1.keys()
inputs
inputs1
"""#**Class Label Creation**"""
labels = torch.LongTensor([0])
labels
"""#**Loss calculation**"""
outputs = model(**inputs, labels=labels)
outputs.keys()
outputs1 = model(**inputs1, labels=labels)
outputs1.keys()
outputs.loss
outputs1.loss
outputs.loss.item()
outputs1.loss.item()
"""#**Prediction**"""
outputs = model(**inputs)
outputs.keys()
outputs1 = model(**inputs)
outputs1.keys()
torch.argmax(outputs.logits)
torch.argmax(outputs1.logits)
"""**so Hence as the result of the logit tensor is 0 hence the model believes that text2 comes after text and text3 comes after text1 which is correct**""" |
Erechtheus/mutation-ner | toIOB.py | # Script to transform corpus from .ann to .iob format
import pandas as pd
import urllib.request # handles url
import spacy # spacing and tokenization #conda install -c conda-forge spacy #python -m spacy download en_core_web_sm
nlp = spacy.load('en_core_web_sm')
url_corpus = "https://raw.githubusercontent.com/Erechtheus/mutationCorpora/master/corpora/original/SETH/corpus.txt"
corpus = urllib.request.urlopen(url_corpus)
corpus_tokenized = [] # store all tokenized abstracts (list of a list (of abstracts) of a list (of sentences of words))
# [ [ ["w1", "w2"], ["w1", "w2"] ], [ ["w1", "w2"], ["w1", "w2"] ], [ ["w1", "w2"], ["w1", "w2"] ] ]
corpus_iob = []
#corpus_sentenceNumber = []
#sentence_counter = 1
for line in corpus: # line = one abstract
decoded_line = line.decode("utf-8") # ID:0-7 SPACE firstWord
pubMed_id = decoded_line.split()[0] # save pubMed iD
print(pubMed_id, '\n')
abstract = decoded_line[len(pubMed_id)+1:] # remove pubMed ID, so that only abstract text remains
# print(abstract, '\n')
doc = nlp(abstract) # feed text to language object nlp -> tokenization
abstract_tokenized = [] # store tokenized abstract divivded in sentences [ ["w1", "w2"], ["w1", "w2"] ], [ ["w1", "w2"], ["w1", "w2"] ]
abstract_iob = []
# abstract_sentenceNumber = []
abstract_tokenized.append('#'+ pubMed_id)
abstract_iob.append('#'+ pubMed_id)
# abstract_sentenceNumber.append('#'+ pubMed_id)
url_annotation = "https://raw.githubusercontent.com/Erechtheus/mutationCorpora/master/corpora/original/SETH/annotations/" + pubMed_id + ".ann"
annotation = urllib.request.urlopen(url_annotation)
annotation_label_list = []
offset_start_list = []
offset_end_list = []
for line_annotation in annotation:
decoded_line_annotation = line_annotation.decode("utf-8")
# print(decoded_line_annotation, '\n')
if decoded_line_annotation.split()[0][0] == 'T':
annotation_label_list.append(decoded_line_annotation.split()[1])
offset_start_list.append(int(decoded_line_annotation.split()[2]))
offset_end_list.append(int(decoded_line_annotation.split()[3]))
if len(annotation_label_list) == 0:
annotation_label_list.append(float('inf'))
offset_start_list.append(float('inf'))
offset_end_list.append(float('inf'))
# print(annotation_label_list, '\n')
# print(offset_start_list, '\n')
# print(offset_end_list, '\n')
i = 0 # index in abstract
annotation_i = 0 # line in annotation file
for sent in doc.sents: # access sentences
sentence_tokenized = [] # ["w1", "w2"]
sentence_iob = []
# sentence_sentenceNumber = []
intermediate = False
for token in sent: # access words/symbols (tokens)
if (i >= offset_end_list[annotation_i]) & (annotation_i < len(offset_end_list)-1) :
annotation_i += 1
intermediate = False
if (i < offset_start_list[annotation_i]) | (i >= offset_end_list[annotation_i]): # non-entity case
intermediate = False
sentence_tokenized.append(token)
sentence_iob.append("O")
# sentence_sentenceNumber.append("Sentence: " + str(sentence_counter))
i += len(token.text_with_ws) # if there is a whitespace after token, count the whitespace as a character too
else: # entity case
if intermediate: # intermediate case
sentence_tokenized.append(token)
sentence_iob.append("I-" + annotation_label_list[annotation_i])
# sentence_sentenceNumber.append("Sentence: " + str(sentence_counter))
i += len(token.text_with_ws) # if there is a whitespace after token, count the whitespace as a character too
else: # Begin case
sentence_tokenized.append(token)
sentence_iob.append("B-" + annotation_label_list[annotation_i])
# sentence_sentenceNumber.append("Sentence: " + str(sentence_counter))
i += len(token.text_with_ws) # if there is a whitespace after token, count the whitespace as a character too
if i < offset_end_list[annotation_i]:
intermediate = True
sentence_tokenized.append(" ") # add space after each sentence
sentence_iob.append(" ")
# sentence_sentenceNumber.append(" ")
# sentence_counter += 1
abstract_tokenized.append(sentence_tokenized)
abstract_iob.append(sentence_iob)
# abstract_sentenceNumber.append(sentence_sentenceNumber)
corpus_tokenized.append(abstract_tokenized)
corpus_iob.append(abstract_iob)
# corpus_sentenceNumber.append(abstract_sentenceNumber)
# print('corpus tokenized: ', '\n')
# print(corpus_tokenized, '\n')
# print('corpus iob: ', '\n')
# print(corpus_iob, '\n')
#dict = {'Sentence #': corpus_sentenceNumber, 'Word': corpus_tokenized, 'Tag': corpus_iob}
dict = {'Word': corpus_tokenized, 'Tag': corpus_iob}
df = pd.DataFrame(dict)
df_ = df.apply(pd.Series.explode).reset_index()
df__ = df_.apply(pd.Series.explode).reset_index()
# df__ = df__.rename(columns={"level_0": "lvl_0"})
# df___ = df__.apply(pd.Series.explode).reset_index()
#output = df___[['Sentence #', 'Word', 'Tag']]
output = df__[['Word', 'Tag']]
#output.to_csv("corpus_IOB_SENT.csv", index=False)
output.to_csv("corpus_IOB.csv", index=False) |
Erechtheus/mutation-ner | NER_evaluation.py | <reponame>Erechtheus/mutation-ner<filename>NER_evaluation.py
# -*- coding: utf-8 -*-
# Transformer based Mutation Recognition of SETH Corpus (NLP-NER)
# Final Evaluation Run of final hyperparameters
# inspired by: https://github.com/huggingface/notebooks/blob/master/transformers_doc/custom_datasets.ipynb
# conda install -c conda-forge seqeval
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report # conda install -c conda-forge scikit-learn
from sklearn.model_selection import train_test_split, KFold
import torch # conda install pytorch=1.5
from datasets import load_metric # conda install -c huggingface -c conda-forge datasets
from transformers import AutoTokenizer, AutoModelForTokenClassification, Trainer, TrainingArguments
import wandb #conda install -c conda-forge wandb
wandb.login()
wandb.init() #wandb.init(project='NER', entity='seyamy')
# Tuned Parameters
MODEL_NAME = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
EPOCHS = 9
BATCH_SIZE = 16
LEARNING_RATE = 0.000115
SEED = 45
K = 5
UNIQUE_TAGS = ['O', 'B-Gene', 'I-Gene', 'B-SNP', 'I-SNP', 'B-RS']
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f"\n Device: {DEVICE} \n")
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
def dataloader(df):
data_noIDs = df[~df["Tag"].str.contains('#')] # remove ID rows
data_noN = data_noIDs[~data_noIDs["Word"].str.contains('\n', na=False)] # remove '\n' rows after each abstract
data = data_noN.dropna() # drop NAs
word_list = data['Word'].tolist()
tag_list = data['Tag'].tolist()
# split list intow sublists (sentences) by ' ' indicating sepreate sentences
# word vector
word_size = len(word_list)
word_idx_list = [idx + 1 for idx, val in enumerate(word_list) if val == ' ']
word_res = [word_list[i: j] for i, j in zip([0] + word_idx_list, word_idx_list + ([word_size] if word_idx_list[-1] != word_size else []))]
for i in word_res: i.remove(' ')
token_docs = [x for x in word_res if not len(x)==0]
# tag vector
tag_size = len(tag_list)
tag_idx_list = [idx + 1 for idx, val in enumerate(tag_list) if val == ' ']
tag_res = [tag_list[i: j] for i, j in zip([0] + tag_idx_list, tag_idx_list + ([tag_size] if tag_idx_list[-1] != tag_size else []))]
for i in tag_res: i.remove(' ')
tag_docs = [x for x in tag_res if not len(x)==0]
return token_docs, tag_docs
def encode_tags(tag2id, tags, encodings):
labels = [[tag2id[tag] for tag in doc] for doc in tags]
encoded_labels = []
for doc_labels, doc_offset in zip(labels, encodings.offset_mapping):
# create an empty array of -100
doc_enc_labels = np.ones(len(doc_offset),dtype=int) * -100
arr_offset = np.array(doc_offset)
# set labels whose first offset position is 0 and the second is not 0 (subtokens case)
doc_enc_labels[(arr_offset[:,0] == 0) & (arr_offset[:,1] != 0)] = doc_labels
encoded_labels.append(doc_enc_labels.tolist())
return encoded_labels
def create_dataset(texts, tags):
# create encodings for tokens and tags (for the tags just create a simple mapping)
tag2id = {tag: id for id, tag in enumerate(UNIQUE_TAGS)}
id2tag = {id: tag for tag, id in tag2id.items()}
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# encode Tokens
encodings = tokenizer(texts, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True) # padding="max_length"
labels = encode_tags(tag2id, tags, encodings)
encodings.pop("offset_mapping") # we don't want to pass this to the model
dataset = Dataset(encodings, labels)
return dataset
def compute_metrics(pred):
metric = load_metric("seqeval")
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# Remove ignored index (special tokens)
true_predictions = [[UNIQUE_TAGS[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(preds, labels)]
true_labels = [[UNIQUE_TAGS[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(preds, labels)]
report = classification_report(
y_true=[val for sublist in true_labels for val in sublist],
y_pred=[val for sublist in true_predictions for val in sublist],
labels=UNIQUE_TAGS)#, output_dict=True)
print(f"\n Classification report from sklearn (calculated results per unique label): \n\n {report} \n\n")
results = metric.compute(predictions=true_predictions, references=true_labels, scheme="IOB1", suffix=False,)
print(f"\n seqeval results (combines B/I prefixes): \n\n {results} \n\n")
# Log metrics over time to visualize performance in wandb
wandb.log({"eval_overall_f1": results['overall_f1']})
return results
def training(model, train_dataset, test_dataset):
training_args = TrainingArguments(
output_dir = './results', # output directory
overwrite_output_dir = True,
num_train_epochs = EPOCHS, # total number of training epochs
per_device_train_batch_size = BATCH_SIZE, # batch size per device during training
per_device_eval_batch_size = BATCH_SIZE, # batch size for evaluation
learning_rate = LEARNING_RATE,
warmup_steps = 500, # number of warmup steps for learning rate scheduler
weight_decay = 0.01, # strength of weight decay
#logging_dir = './logs', # directory for storing logs
#logging_steps = 10,
evaluation_strategy = "epoch", # // "steps"
save_strategy = "epoch",
#eval_steps = 10000, # Evaluation and Save happens every 10 steps
save_total_limit = 1, # Only last (n) models are saved. Older ones are deleted.
load_best_model_at_end = True, # load the best model when finished training (default metric is loss) # the model loaded at the end of training is the one that had the best performance on validation set. So when you save that model, you have the best model on this validation set.
metric_for_best_model = "eval_overall_f1", # name of a metric returned by the evaluation
greater_is_better = True, # higher eval_overall_f1 score is better
seed = SEED,
report_to = "wandb", # enable logging to W&B
run_name="run-1", # name of the W&B run (optional)
)
trainer = Trainer(
model=model, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=test_dataset, # evaluation dataset
compute_metrics=compute_metrics,
#callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
# fine tune model
trainer.train()
evaluation = trainer.evaluate() # allows to evaluate again on the evaluation dataset or on another dataset
return trainer, evaluation
def final_evaluation(best_model_path, final_test_dataset):
# load best saved model
best_model = AutoModelForTokenClassification.from_pretrained(best_model_path, num_labels=len(UNIQUE_TAGS)).to(DEVICE)
# Define test trainer
final_test_trainer = Trainer(model = best_model, compute_metrics = compute_metrics)
final_test_evaluation = final_test_trainer.evaluate(eval_dataset = final_test_dataset)
return final_test_evaluation
def main():
print("\n -- Load & Preprocess Data -- \n")
# load data
data_path = "corpus_IOB.csv"
data_raw = pd.read_csv(data_path, encoding="latin1" )
data_raw = data_raw.dropna() # drop NAs
# split raw dataframe into multiple dataframes each representing an abstract
id_idx_list = [idx for idx, val in enumerate(data_raw['Word'].tolist()) if '#' in val] # indices where to split dataframe (based on #ID rows)
idx_mod = id_idx_list + [len(data_raw)] #[max(id_idx_list)+1]
list_of_dfs = [data_raw.iloc[idx_mod[n]:idx_mod[n+1]] for n in range(len(idx_mod)-1)]
# extract final test set
list_dfs_train , list_dfs_test = train_test_split(list_of_dfs, test_size=0.1, random_state=42)
final_test = pd.concat(list_dfs_test)
final_test_texts, final_test_tags = dataloader(final_test)
# tokenization & create dataset
final_test_dataset = create_dataset(final_test_texts, final_test_tags)
# k-fold Cross Validation Training Loop
best_f1_score = 0
average_overall_f1_score = 0
average_SNP_f1_score = 0
# prepare cross validation
kfold = KFold(n_splits=K, shuffle=True, random_state=42)
# enumerate splits
for i, (train, test) in enumerate(kfold.split(list_dfs_train)):
print(f"Fold {i}:")
#print(f"train indices: {train}, test indices: {test}")
train_dfs = []
for f in train: train_dfs.append(list_dfs_train[f])
train_fold = pd.concat(train_dfs)
test_dfs = []
for f in test: test_dfs.append(list_dfs_train[f])
test_fold = pd.concat(test_dfs)
train_texts, train_tags = dataloader(train_fold)
test_texts, test_tags = dataloader(test_fold)
# global UNIQUE_TAGS # prevents creation of a local variable called myglobal
# UNIQUE_TAGS = np.unique(np.array([tag for doc in train_tags for tag in doc])) # look only at training tags because cant predict (on test set) what was seen
# print(UNIQUE_TAGS, '\n')
# tokenization & create datasets
train_dataset = create_dataset(train_texts, train_tags)
test_dataset = create_dataset(test_texts, test_tags)
# load token classification model and specify the number of labels
print("\n -- Load Model -- \n")
model = AutoModelForTokenClassification.from_pretrained(MODEL_NAME, num_labels=len(UNIQUE_TAGS)).to(DEVICE)
# training & evaluation
print("\n -- Start Training -- \n")
trainer, evaluation = training(model, train_dataset, test_dataset)
#training_native(model, train_dataset, test_dataset)
print("\n -- Evaluation Results -- \n")
print("current overall f1 score: ", evaluation['eval_overall_f1'], '\n')
print("current SNP f1 score: ", evaluation['eval_SNP']['f1'], '\n')
average_overall_f1_score += evaluation['eval_overall_f1']
average_SNP_f1_score += evaluation['eval_SNP']['f1']
if evaluation['eval_overall_f1'] > best_f1_score:
print(f"-> best SNP f1 score so far (previous best f1 score: {best_f1_score} ) -> save model \n")
best_f1_score = evaluation['eval_overall_f1']
model.save_pretrained("saved_model")
else:
print(f"SNP f1 score is worse, best SNP f1 score so far is: {best_f1_score} \n")
print(f"\n - Fold {i} done - \n")
print("\n -- Complete training done -- \n")
average_overall_f1_score = average_overall_f1_score/K
average_SNP_f1_score = average_SNP_f1_score/K
print(f"\n Average overall f1 score (k = {K}): {average_overall_f1_score}")
print(f"\n Average SNP f1 score (k = {K}): {average_SNP_f1_score}")
print("\n -- Evaluate best model on final test set -- \n")
final_test_evaluation = final_evaluation("saved_model", final_test_dataset)
print("\n -- Done Evaluation --")
return 0
if __name__ == "__main__":
main() |
Erechtheus/mutation-ner | NER_prediction.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Transformer based Mutation Recognition of SETH Corpus (NLP-NER)
# Pipeline for prediciton of IOB-Tags in a given text using the best tuned model
# inspired by: https://huggingface.co/dslim/bert-base-NER
import torch # conda install pytorch=1.5
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
MODEL_NAME = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
UNIQUE_TAGS = ['O', 'B-Gene', 'I-Gene', 'B-SNP', 'I-SNP', 'B-RS']
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f"\n Device: {DEVICE} \n")
def make_prediction(best_model_path, text):
best_model = AutoModelForTokenClassification.from_pretrained(best_model_path, num_labels=len(UNIQUE_TAGS)).to(DEVICE)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
nlp = pipeline('ner', model=best_model, tokenizer=tokenizer)
prediction = nlp(text)
print(text, '\n', file=open("output.txt", "w"))
print(*prediction, sep = '\n', file=open("output.txt", "a"))
return prediction
def main():
print("\n -- Predict IOB-tags of a given text using best model -- \n")
model_path = "best_model" # path to tuned model that should be used for label prediciton
# text to be labeled
text = "In human glutathione transferase P1-1 (hGSTP1-1) position 146 is occupied by a glycine residue, which is located in a bend of a long loop that together with the alpha6-helix forms a substructure (GST motif II) maintained in all soluble GSTs. In the present study G146A and G146V mutants were generated by site-directed mutagenesis in order to investigate the function played by this conserved residue in folding and stability of hGSTP1-1."
# --> Mutations: G146A, G146V
print(f"Text to predict: \n{text}\n")
prediction = make_prediction(model_path, text)
print("\n Prediction of IOB-Tags (0 = O, 1 = B-Gene, 2 = I-Gene, 3 = B-SNP, 4 = I-SNP, 5 = B-RS) \n", *prediction, sep = '\n')
print("\n -- Done Prediction --")
return 0
if __name__ == "__main__":
main() |
Erechtheus/mutation-ner | data-analysis.py | import pandas as pd
from sklearn.model_selection import train_test_split, KFold
import matplotlib.pyplot as plt
import seaborn as sns
def main():
print("\n -- Load & Preprocess Data -- \n")
# load data
data_path = "corpus_IOB.csv"
data_raw = pd.read_csv(data_path, encoding="latin1" )
data_raw = data_raw.dropna() # drop NAs
data_raw = data_raw[~data_raw["Word"].str.contains('\n| ', na=False)] # remove '\n' rows after each abstract
# split raw dataframe into multiple dataframes each representing an abstract
id_idx_list = [idx for idx, val in enumerate(data_raw['Word'].tolist()) if '#' in val] # indices where to split dataframe (based on #ID rows)
idx_mod = id_idx_list + [len(data_raw)] #[max(id_idx_list)+1]
list_of_dfs = [data_raw.iloc[idx_mod[n]:idx_mod[n+1]] for n in range(len(idx_mod)-1)]
# extract final test set
list_dfs_train , list_dfs_test = train_test_split(list_of_dfs, test_size=0.1, random_state=42)
full_train = pd.concat(list_dfs_train)
full_train = full_train[~full_train["Tag"].str.contains('#')] # drop ID rows
final_test = pd.concat(list_dfs_test)
final_test = final_test[~final_test["Tag"].str.contains('#')] # drop ID rows
#print(full_train[full_train["Tag"].str.contains('O|B-Gene|B-SNP|I-Gene|I-SNP|B-RS')==False])
#print(full_train, final_test, sep='\n')
fig, ax = plt.subplots(1,2)
ax1 = sns.countplot(x=full_train['Tag'], ax=ax[0])
ax2 = sns.countplot(x=final_test['Tag'], ax=ax[1])
ax1.set_title('Training', fontsize=20)
ax2.set_title('Test', fontsize=20)
for p in ax1.patches:
ax1.annotate(f'\n{p.get_height()}', (p.get_x()+0.2, p.get_height()+5000), ha='center', va='top', color='black', size=12)
for p in ax2.patches:
ax2.annotate(f'\n{p.get_height()}', (p.get_x()+0.2, p.get_height()+700), ha='center', va='top', color='black', size=12)
#plt.savefig('output.png')
plt.show()
print("\n -- Done -- \n")
return 0
if __name__ == "__main__":
main() |
dowellde/workshop2 | workshop2/__init__.py | import os
import argparse
import configparser
import sys
def run():
parser = argparse.ArgumentParser(description='This is a test package')
parser.add_argument('--display',help='Prints a statement',default='No display message')
parser.add_argument('--config',help='Reads a config.ini file.',default=False)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
arg = parser.parse_args().display
print arg
print ("Hello World!")
os.system("echo hello world in bash!")
configfile = parser.parse_args().config
if configfile != False:
config = configparser.ConfigParser(interpolation = configparser.ExtendedInterpolation())
config.read(configfile)
for key in config:
for item in config[key]:
print key, item, config[key][item]
|
dowellde/workshop2 | workshop2/__main__.py | <filename>workshop2/__main__.py
import __init__
__init__.run()
|
ThallyssonKlein/DjangoBasicAuth | core/views.py | <filename>core/views.py
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
class Home(APIView):
permission_classes = (IsAuthenticated, )
def get(self, request, format=None):
return Response('Ok', status=200)
|
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | ConnectFourMinimaxAI.py | import copy
import ConnectFourEngine
import ConnectFourBoard
import random
def other(token):
if token == ConnectFourBoard.RED:
return ConnectFourBoard.BLUE
elif token == ConnectFourBoard.BLUE:
return ConnectFourBoard.RED
else:
return None
def state_score(board, red_turn):
(score_red, score_blue) = board.score()
if red_turn:
return score_red - score_blue
else:
return score_blue - score_red
def max_play(board, token, ply_remaining, red_turn):
moves = []
available_moves = board.not_full_columns()
random.shuffle(available_moves)
for n in available_moves:
newboard = copy.deepcopy(board)
if (newboard.col_height(n) < newboard.height):
newboard.attempt_insert(n, token)
if ply_remaining <= 0 or newboard.is_full():
value = state_score(newboard, token==ConnectFourBoard.RED)
else:
(min_move, value) = min_play(newboard, other(token), ply_remaining -1, red_turn )
moves.append((n, value))
best_move = max(moves, key = lambda x: x[1])
return best_move
def min_play(board, token, ply_remaining, red_turn):
moves = []
available_moves = board.not_full_columns()
random.shuffle(available_moves)
for n in available_moves:
newboard = copy.deepcopy(board)
if (newboard.col_height(n) < newboard.height):
newboard.attempt_insert(n, token)
if ply_remaining <= 0 or newboard.is_full():
value = state_score(newboard, token==ConnectFourBoard.RED)
else:
(max_move, value) = max_play(newboard, other(token), ply_remaining-1, red_turn ) # the highest possible move you can get for this column
moves.append((n, value))
best_move = min(moves, key = lambda x: x[1])
return best_move
def AIcheck(board, token, red_turn):
ply_remaining = 4
(move, value) = max_play(board, token, ply_remaining, red_turn)
return move
|
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | AlphaBetaAI.py | import copy
import ConnectFourEngine
import ConnectFourBoard
import math
import random
p_inf = float("inf")
n_inf = float("-inf")
def other(token):
if token == ConnectFourBoard.RED:
return ConnectFourBoard.BLUE
elif token == ConnectFourBoard.BLUE:
return ConnectFourBoard.RED
else:
return None
def state_score(board, red_turn):
(score_red, score_blue) = board.score()
if red_turn:
return score_red - score_blue
else:
return score_blue - score_red
def max_play(board, token, ply_remaining, red_turn, alpha, beta):
(best_move, value) = (None, n_inf)
available_moves = board.not_full_columns()
random.shuffle(available_moves)
for n in available_moves:
newboard = copy.deepcopy(board)
if (newboard.col_height(n) < newboard.height):
newboard.attempt_insert(n, token)
if ply_remaining <= 0 or newboard.is_full():
value_child = state_score(newboard, red_turn)
else:
(min_move, value_child) = min_play(newboard, other(token), ply_remaining-1, red_turn, alpha, beta)
if value < value_child:
(best_move, value) = (n, value_child)
alpha = max(alpha, value_child)
if alpha >= beta: return (best_move, alpha)
return (best_move, alpha)
def min_play(board, token, ply_remaining, red_turn, alpha, beta):
(best_move, value) = (None, p_inf)
available_moves = board.not_full_columns()
random.shuffle(available_moves)
for n in available_moves:
newboard = copy.deepcopy(board)
if (newboard.col_height(n) < newboard.height):
newboard.attempt_insert(n, token)
if ply_remaining <= 0 or newboard.is_full():
value_child = state_score(newboard, red_turn)
else:
(max_move, value_child) = max_play(newboard, other(token), ply_remaining-1, red_turn, alpha, beta) # the highest possible move you can get for this column
if value > value_child:
(best_move, value) = (n, value_child)
beta = min(beta, value_child)
if alpha >= beta: return (best_move, beta)
return (best_move, beta)
def AIcheck(board, token, red_turn):
ply_remaining = 4
(move, value) = max_play(board, token, ply_remaining, red_turn, alpha=n_inf, beta=p_inf,) #change not red_turn to not human_turn
return move
|
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | ConnectFourGraphics.py | import pygame
# This module takes care of the graphical interface: drawing the shapes and
# printing the text that together make the game window.
# display constants (these are not crucial to the game or pygame)
FONTSIZE = 24
CELL_SIZE = 48
OFFSET_CANVAS = 20
TOP_OFFSET = 24
BOTTOM_SPACING = 64
# colour constants
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = ( 0, 0, 255)
YELLOW = (250, 240, 190)
def setup_display(board):
window_width = 2 * OFFSET_CANVAS + board.width * CELL_SIZE
window_height = 2 * OFFSET_CANVAS + TOP_OFFSET + BOTTOM_SPACING + board.height * CELL_SIZE
display = pygame.display.set_mode((window_width, window_height), 0, 32)
pygame.display.set_caption('Connect Many')
gamefont = pygame.font.Font(None, FONTSIZE)
return (display, gamefont)
def draw_arrow(display, column):
top_point = (OFFSET_CANVAS + CELL_SIZE / 2 + CELL_SIZE * column,
OFFSET_CANVAS)
bottom_point = (OFFSET_CANVAS + CELL_SIZE / 2 + CELL_SIZE * column,
OFFSET_CANVAS + TOP_OFFSET * 3 / 4)
left_point = (OFFSET_CANVAS + 3 * CELL_SIZE / 8 + CELL_SIZE * column,
OFFSET_CANVAS + TOP_OFFSET / 2)
right_point = (OFFSET_CANVAS + 5 * CELL_SIZE / 8 + CELL_SIZE * column,
OFFSET_CANVAS + TOP_OFFSET / 2)
pygame.draw.line(display, BLACK, left_point, bottom_point, 3)
pygame.draw.line(display, BLACK, right_point, bottom_point, 3)
pygame.draw.line(display, BLACK, top_point, bottom_point, 3)
def draw_board(game_display,
board, score_red, score_blue,
selected_index, game_running, player_turn, red_turn, winner):
(display, gamefont) = game_display
display.fill(YELLOW)
# draw border
pygame.draw.rect(display, BLACK,
(OFFSET_CANVAS,
OFFSET_CANVAS + TOP_OFFSET,
board.width * CELL_SIZE,
board.height * CELL_SIZE
),
2)
# draw all tokens and circles
for j in range(board.height):
for i in range(board.width):
xc = OFFSET_CANVAS + CELL_SIZE / 2 + i * CELL_SIZE
yc = OFFSET_CANVAS + TOP_OFFSET + CELL_SIZE / 2 + (board.height - j - 1) * CELL_SIZE
if board.field[i][j] == 1:
pygame.draw.circle(display, RED, (int(xc), int(yc)), int(CELL_SIZE * 2 / 5), 0)
if board.field[i][j] == 2:
pygame.draw.circle(display, BLUE, (int(xc), int(yc)), int(CELL_SIZE * 2 / 5), 0)
pygame.draw.circle(display, BLACK, (int(xc), int(yc)), int(CELL_SIZE * 2 / 5), 1)
# display players' score
red_score_surf = gamefont.render('RED: ' + str(score_red), False, RED)
blue_score_surf = gamefont.render('BLUE: ' + str(score_blue), False, BLUE)
score_x = OFFSET_CANVAS
score_y = 2 * OFFSET_CANVAS + TOP_OFFSET + board.height * CELL_SIZE
display.blit(red_score_surf, (score_x, score_y))
display.blit(blue_score_surf, (score_x, score_y + FONTSIZE))
# potentially display arrow
if selected_index >= 0 and game_running and player_turn:
draw_arrow(display, selected_index)
# is it the AI player's turn?
if game_running:
if red_turn:
thinking_surf = gamefont.render("Red playing...", False, RED)
else:
thinking_surf = gamefont.render("Blue playing...", False, BLUE)
display.blit(thinking_surf, (OFFSET_CANVAS + 3 * CELL_SIZE, 2 * OFFSET_CANVAS + TOP_OFFSET + board.height * CELL_SIZE))
if not(game_running):
draw_winners(display, gamefont, winner)
pygame.display.update()
def draw_winners(display, gamefont, winner):
if winner == 0:
win_surf = gamefont.render("DRAW!", False, BLACK)
elif winner == 1:
win_surf = gamefont.render("RED WINS!", False, RED)
else:
win_surf = gamefont.render("BLUE WINS!", False, BLUE)
display.blit(win_surf, (OFFSET_CANVAS, OFFSET_CANVAS / 2))
# check which column is being hovered
def hovered_col(board):
(mouse_x, mouse_y) = pygame.mouse.get_pos()
if (mouse_x >= OFFSET_CANVAS \
and mouse_x < OFFSET_CANVAS + board.width * CELL_SIZE \
and mouse_y >= OFFSET_CANVAS + TOP_OFFSET \
and mouse_y <= OFFSET_CANVAS + TOP_OFFSET + board.height * CELL_SIZE):
# The player clicked on a column, not outside
return int((mouse_x - OFFSET_CANVAS) / CELL_SIZE)
else:
# `-1` is the indicator that nothing has been selected
return -1
|
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | ConnectFour1Player.py | <reponame>isswaroop/Alpha-Beta-Pruning-For-Connect-Four
import ConnectFourRandomAI
import ConnectFourMinimaxAI
import AlphaBetaAI
import ConnectFourEngine
if __name__ == '__main__':
# Initialise the game engine
# Modify these parameters to tweak the game
app = ConnectFourEngine.ConnectFour(
ai_delay = 20,
#red_player = ConnectFourMinimaxAI.AIcheck, #None,
blue_player = AlphaBetaAI.AIcheck #ConnectFourMinimaxAI.AIcheck ,
)
# start the game engine
app.game_loop()
|
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | ConnectFourEngine.py | # import all of the necessary libraries
import pygame, sys
from pygame.locals import *
# import helper libraries
import ConnectFourGraphics
import ConnectFourBoard
class ConnectFour:
# The `__init__` method is called to initialise the `ConnectFour` game.
def __init__(self,
height = 9, width = 9,
rewards = None,
winscore = 100,
red_player = None,
blue_player = None,
ai_delay = 60
):
## initialise pygame
pygame.init()
pygame.font.init()
## board
self.board = ConnectFourBoard.EmptyBoard(height, width, rewards, winscore)
## interface
self.selected_index = -1
self.display = ConnectFourGraphics.setup_display(self.board)
### PLAYER SETTINGS ###
self.red_player = red_player
self.blue_player = blue_player
self.ai_delay = ai_delay
self.time_cumulative = {ConnectFourBoard.RED : 0, ConnectFourBoard.BLUE : 0}
## state of the game (scoreboard, who's turn it is, etc.)
self.score_red = 0
self.score_blue = 0
self.winner = 0
self.game_running = True
self.red_turn = True
## draw initial board
self.draw()
def human_turn(self):
if self.red_turn and self.red_player is None:
# It's red's turn and red's human
return True
elif (not self.red_turn) and self.blue_player is None:
# It's blue's turn and blue's human
return True
else:
return False
def draw(self):
# A wrapper around the `ConnectFourGraphics.draw_board` function that
# picks all the right components of `self`.
ConnectFourGraphics.draw_board(self.display, self.board,
self.score_red, self.score_blue,
self.selected_index, self.game_running,
self.human_turn(), self.red_turn, self.winner)
def turn_token(self):
if self.red_turn:
return ConnectFourBoard.RED
else:
return ConnectFourBoard.BLUE
# current player attempts to insert into a column
def attempt_insert(self, col):
token = self.turn_token()
success = self.board.attempt_insert(col, token)
if success:
(self.score_red, self.score_blue) = self.board.score()
if self.win_check():
self.set_winner()
self.red_turn = not(self.red_turn)
# else do nothing: this forces the player to choose again
def game_loop(self):
while self.game_running:
# Let the AI play if it's its turn
if not self.human_turn():
start_ai_time = pygame.time.get_ticks()
token = self.turn_token()
if token == ConnectFourBoard.RED:
move = self.red_player(self.board, token, self.red_turn)
elif token == ConnectFourBoard.BLUE:
move = self.blue_player(self.board, token, self.red_turn)
self.attempt_insert(move)
stop_ai_time = pygame.time.get_ticks()
ai_time_span = stop_ai_time - start_ai_time
self.time_cumulative[token] = self.time_cumulative[token] + ai_time_span
if ai_time_span < self.ai_delay:
pygame.time.delay(self.ai_delay - ai_time_span)
# Process all events, especially mouse events.
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit(0)
if event.type == MOUSEMOTION:
self.selected_index = ConnectFourGraphics.hovered_col(self.board)
if event.type == MOUSEBUTTONDOWN and event.button == 1:
if self.human_turn():
self.attempt_insert(self.selected_index)
# Refresh the display and loop back
self.draw()
pygame.time.wait(40)
#return ("Red" if self.winner==1 else "Blue" if self.winner==2 else "None", self.time_cumulative[1], self.time_cumulative[2])
# Once the game is finish, simply wait for the `QUIT` event
while True:
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit(0)
pygame.time.wait(60)
def refresh_scores(self):
(red, blue) = board.score()
self.score_red = red
self.score_blue = blue
# is the game finished?
# return True if that is the case otherwise return False
def win_check(self):
win_score = 100
red_win = self.score_red >= win_score
blue_win = self.score_blue >= win_score
full_board = all([self.board.col_height(n) == self.board.height for n in range(self.board.width)])
return red_win or blue_win or full_board
def set_winner(self):
self.game_running = False
if self.score_red > self.score_blue:
self.winner = ConnectFourBoard.RED
elif self.score_red < self.score_blue:
self.winner = ConnectFourBoard.BLUE
else:
self.winner = 0
|
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | ConnectFour0Player.py | <reponame>isswaroop/Alpha-Beta-Pruning-For-Connect-Four<gh_stars>0
import ConnectFourRandomAI
import ConnectFourMinimaxAI
import AlphaBetaAI
import ConnectFourEngine
if __name__ == '__main__':
# Initialise the game engine
for i in range(4):
app = ConnectFourEngine.ConnectFour(
ai_delay = 0,
red_player = AlphaBetaAI.AIcheck,
blue_player = AlphaBetaAI.AIcheck,
)
# start the game engines
app.game_loop() |
isswaroop/Alpha-Beta-Pruning-For-Connect-Four | ConnectFourBoard.py | <filename>ConnectFourBoard.py<gh_stars>0
import copy
# Board tokens
RED = 1
BLUE = 2
# A few helper functions to manage board initialisation
def new_empty_board(height, width):
return [([0] * height) for k in range(width)]
def valid_board(board):
# Checks that the board is a rectangle. If it isn't, this function raises
# an exception which interupts the program.
if len(board) == 0:
raise InvalidBoard('The board has no space')
else:
l = len(board[0])
if any(len(col) != l for col in board):
raise InvalidBoard('Not all columns have the same heights')
elif l == 0:
raise InvalidBoard('The board has no space')
class Board():
def __init__(self, board=None, rewards=None, winscore=100):
if board == None:
# If no board is passed explicitely, just create one
board = new_empty_board(8, 9)
self.field = board
# This next line will crash the program if the provided board is wrong
valid_board(self.field)
self.width = len(self.field)
self.height = len(self.field[0])
if rewards == None:
# The default rewards: [0, 1, 2, 4, 8, 16, 32, etc. ]
rewards = [0] + [ 2 ** (n - 1) for n in range(1, max(self.width, self.height)) ]
self.rewards = rewards
self.winscore = winscore
def col_height(self, col):
l = 0
for space in self.field[col]:
if space != 0:
l += 1
return l
def not_full_columns(self):
# This method collects all the columns that are not full. This gives a
# list of playable columns. It is useful for AIs.
cs = []
for col in range(self.width):
if self.col_height(col) < self.height:
cs.append(col)
return cs
def attempt_insert(self, col, token):
# is it possible to insert into this column?
if self.col_height(col) < self.height:
# add a token in the selected column
self.field[col][self.col_height(col)] = token
# return True for success
return True
else:
# return False for Failure
return False
def scoreGeneral(self, dirX, dirY, token):
used_ls=[]
score = 0
for row in range(self.height):
inRun = False
run_length = 0
for col in range(self.width):
colcol = col
rowrow = row
if (colcol, rowrow) not in used_ls:
while colcol >= 0 and rowrow < self.height and colcol < self.width:
used_ls.append((colcol,rowrow))
if self.field[colcol][rowrow] is token and not inRun:
inRun = True
if self.field[colcol][rowrow] is token:
run_length += 1
else:
inRun = False
if run_length > 1:
score += self.rewards[run_length - 1]
run_length = 0
colcol += dirX
rowrow += dirY
if inRun and run_length > 1:
score += self.rewards[run_length - 1]
inRun = False
run_length = 0
return score
def score (self):
red1 = (self.scoreGeneral(-1,1, 1))
red2 = (self.scoreGeneral(1,0, 1))
red3 = (self.scoreGeneral(1,1, 1))
red4 = (self.scoreGeneral(0,1, 1))
blue1 = (self.scoreGeneral(-1,1, 2))
blue2 = (self.scoreGeneral(1,0, 2))
blue3 = (self.scoreGeneral(1,1, 2))
blue4 = (self.scoreGeneral(0,1, 2))
red = red1 + red2 + red3 + red4
blue = blue1 + blue2 + blue3 + blue4
return(red, blue)
def refresh_scores(self):
(red, blue) = self.score(self.field_state)
def is_full(self):
full_board = all([self.col_height(n) == self.height for n in range(self.width)])
return full_board
# This additional class simply creates an empty board of a given size.
# Note the `Board` between brackets (`(` and `)`). This means that the methods
# from the class `Board` are available in the class `EmptyBoard`. In other
# words, `EmptyBoard` is just a special case of the general case `Board`.
class EmptyBoard(Board):
# Function to set up the objects of this class
def __init__(self, height=8, width=9, rewards=None, winscore=100):
# Create a simple empty board with the right height and width
fresh_board = new_empty_board(height, width)
# Then, proceed to set-up as in `Board`. The `super()` part refers to
# the class `Board`.
Board.__init__(self, fresh_board, rewards, winscore) |
NetEase-FuXi/EET | example/python/bert_transformers_example.py | import torch
import numpy as np
from eet.transformers.modeling_bert import EETBertModel
from transformers import BertModel
import time
using_half = True
seq_len = 128
batch = 4
loop = 100
def main():
torch.set_grad_enabled(False)
input = np.random.randint(1000,9000,seq_len * batch,dtype="int64")
input_ids = torch.from_numpy(input).long().reshape(batch, seq_len).cuda()
data_type = torch.float32
if using_half:
data_type = torch.float16
eet_model = EETBertModel.from_pretrained('bert-base-uncased',max_batch = batch,data_type = data_type)
ts_model = BertModel.from_pretrained('bert-base-uncased').cuda().half()
attention_mask = None
t1 = time.perf_counter()
for i in range(loop):
res_eet = eet_model(input_ids, attention_mask=attention_mask)
t2 = time.perf_counter()
time_eet = t2 - t1
print('Time for EET : ', time_eet)
t3 = time.perf_counter()
for i in range(loop):
res_ts = ts_model(input_ids,attention_mask)
t4= time.perf_counter()
time_ts = t4 -t3
print('Time for Transformers: ', time_ts)
print('SpeedUp is ', time_ts / time_eet)
if __name__ == '__main__':
main()
|
NetEase-FuXi/EET | example/python/gpt2_fairseq_example.py | <filename>example/python/gpt2_fairseq_example.py
import torch
import time
import numpy as np
from torch import nn
from fairseq.data.dictionary import Dictionary
from eet.fairseq.transformer import EETTransformerDecoder
using_pytorch = True
using_eet = True
using_half = False
prompt_seq_len = 4
# eet supports a maximum seq_len of 4096
max_seq_len = 1024
batch = 4
class Args(object):
def __init__(self,
decoder_layerdrop,
share_decoder_input_output_embed,
decoder_embed_dim,
decoder_output_dim,
max_target_positions,
no_scale_embedding,
decoder_learned_pos,
no_token_positional_embeddings,
decoder_normalize_before,
decoder_layers,
decoder_attention_heads,
decoder_ffn_embed_dim,
adaptive_softmax_cutoff=None,
dropout=0.1,
attention_dropout=0.1,
activation_fn='relu',
adaptive_input=False,
quant_noise_pq=0
):
super().__init__()
self.decoder_layerdrop = decoder_layerdrop
self.share_decoder_input_output_embed = share_decoder_input_output_embed
self.decoder_embed_dim = decoder_embed_dim
self.decoder_output_dim = decoder_output_dim
self.max_target_positions = max_target_positions
self.no_scale_embedding = no_scale_embedding
self.decoder_learned_pos = decoder_learned_pos
self.no_token_positional_embeddings = no_token_positional_embeddings
self.decoder_normalize_before = decoder_normalize_before
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_ffn_embed_dim = decoder_ffn_embed_dim
self.adaptive_softmax_cutoff = adaptive_softmax_cutoff
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_fn = activation_fn
self.fp16 = False
self.adaptive_input = adaptive_input
self.quant_noise_pq = quant_noise_pq
assert self.decoder_embed_dim == self.decoder_output_dim
args = Args(0, True, 512, 512, 1024, False, False, False, False, 6, 8, 2048, None, 0.1, 0.1)
embedding = nn.Embedding(13672, 512, padding_idx=1)
dictionary = Dictionary.load('../../resource/data/dict.txt')
def main():
model_id_or_path = '../../resource/model/checkpoint_best.pt'
torch.set_grad_enabled(False)
pretrained_dict = torch.load(model_id_or_path)
model_dict = {}
tokens = np.random.randint(3,13672,max_seq_len * batch,dtype="int64")
tokens = torch.from_numpy(tokens).long().reshape(batch, max_seq_len).cuda()
# tokens[2:4,0:2] = 1
if using_eet:
data_type = torch.float32
if using_half:
data_type = torch.float16
eet_config = {"data_type":data_type,"max_batch":batch,"full_seq_len":prompt_seq_len}
eet_model = EETTransformerDecoder.from_torch(model_id_or_path = model_id_or_path,dictionary = dictionary,args = args,config = eet_config,no_encoder_attn = True)
total_time_eet = 0
first_pass = True
reorder_state = None
for step in range(prompt_seq_len-1, max_seq_len):
print('step:',step)
torch.cuda.synchronize()
t1 = time.perf_counter()
if first_pass:
input_ids_eet = torch.clone(tokens[:, :step + 1].contiguous()).cuda().long()
else:
input_ids_eet = torch.clone(tokens[:, step:step + 1].contiguous()).cuda().long()
res_eet = eet_model(input_ids_eet, reorder_state = reorder_state,first_pass = first_pass)
torch.cuda.synchronize()
t2 = time.perf_counter()
print('eet time : ', t2 - t1)
total_time_eet += (t2 - t1)
# eet support dynamic batch according to the reorder_state
reorder_state = torch.tensor([1,0,2,3]).cuda()
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=reorder_state
)
if first_pass == True:
first_pass = False
print('total time for eet : ', total_time_eet)
if __name__ == '__main__':
main()
|
NetEase-FuXi/EET | example/python/gpt2_transformers_example.py | <reponame>NetEase-FuXi/EET
import torch
import numpy as np
from eet.transformers.modeling_gpt2 import EETGPT2Model
using_half = False
seq_len = 128
batch = 5
def main():
input = np.random.randint(1000,9000,seq_len * batch,dtype="int64")
inputs = np.random.randint(1000,9000,1 * batch,dtype="int64")
# prompt context
input_full_decoder = torch.from_numpy(input).long().reshape(batch, seq_len).cuda()
# prediction
input_inc_decoder = torch.from_numpy(inputs).long().reshape(batch, 1).cuda()
data_type = torch.float32
if using_half:
data_type = torch.float16
# load pytorch model
eet_model = EETGPT2Model.from_pretrained('gpt2',max_batch = batch, full_seq_len = seq_len,data_type = data_type)
input_ids = input_full_decoder
first_pass = True
for i in range(100):
print('i--:',i)
res_eet = eet_model(input_ids,first_pass= first_pass)
if first_pass:
first_pass = False
input_ids = input_inc_decoder
if __name__ == '__main__':
main()
|
NetEase-FuXi/EET | python/eet/transformers/__init__.py | <filename>python/eet/transformers/__init__.py
from .modeling_bert import *
from .modeling_gpt2 import *
|
NetEase-FuXi/EET | setup.py | from setuptools import find_packages, setup, Extension
from torch.utils import cpp_extension
import glob
import os
import subprocess
__version__ = "0.0.1"
current_dir = os.path.dirname(os.path.abspath(__file__))
cuda_sources = glob.glob(os.path.join(current_dir, 'csrc', 'core', '*.cu'))
cpp_sources = glob.glob(os.path.join(current_dir, 'csrc', 'op', '*.cpp'))
py11_sources = glob.glob(os.path.join(current_dir, 'csrc', 'py11', '*.cpp'))
sources = cuda_sources + cpp_sources + py11_sources
cuda_include_paths = cpp_extension.include_paths(cuda=True)
self_include_paths = [os.path.join(current_dir, 'csrc')]
include_paths = cuda_include_paths + self_include_paths
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
setup(
name='EET',
version=__version__,
package_dir={"": "python"},
packages=find_packages("python"),
ext_modules=[
cpp_extension.CUDAExtension(
name='EET',
sources=sources,
include_dirs=include_paths,
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_HALF2_OPERATORS__']},
define_macros=[('VERSION_INFO', __version__)]
)
],
cmdclass={
'build_ext': cpp_extension.BuildExtension}
)
|
NetEase-FuXi/EET | example/python/gpt2_from_buffer_example.py | <reponame>NetEase-FuXi/EET
import torch
import time
import numpy as np
from torch import nn
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerDecoder
from eet.fairseq.transformer import EETTransformerDecoder
import sys
context_len = 512
batch = 4
max_seq_len = 1024
class Args(object):
def __init__(self,
decoder_layerdrop,
share_decoder_input_output_embed,
decoder_embed_dim,
decoder_output_dim,
max_target_positions,
no_scale_embedding,
decoder_learned_pos,
no_token_positional_embeddings,
decoder_normalize_before,
decoder_layers,
decoder_attention_heads,
decoder_ffn_embed_dim,
adaptive_softmax_cutoff=None,
dropout=0.1,
attention_dropout=0.1,
activation_fn='relu',
adaptive_input=False,
quant_noise_pq=0
):
super().__init__()
self.decoder_layerdrop = decoder_layerdrop
self.share_decoder_input_output_embed = share_decoder_input_output_embed
self.decoder_embed_dim = decoder_embed_dim
self.decoder_output_dim = decoder_output_dim
self.max_target_positions = max_target_positions
self.no_scale_embedding = no_scale_embedding
self.decoder_learned_pos = decoder_learned_pos
self.no_token_positional_embeddings = no_token_positional_embeddings
self.decoder_normalize_before = decoder_normalize_before
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_ffn_embed_dim = decoder_ffn_embed_dim
self.adaptive_softmax_cutoff = adaptive_softmax_cutoff
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_fn = activation_fn
self.fp16 = False
self.adaptive_input = adaptive_input
self.quant_noise_pq = quant_noise_pq
assert self.decoder_embed_dim == self.decoder_output_dim
#1280 -- hidden_units
#16 -- layer num
args = Args(0, True, 1024, 1024, max_seq_len, False, False, False, True, 24, 16, 1024 * 4, None, 0.1, 0.1)
embedding = nn.Embedding(13672, 1024, padding_idx=1)
dictionary = Dictionary.load('resource/dict.txt')
def main():
torch.set_grad_enabled(False)
tokens = np.random.randint(3,13672,max_seq_len * batch,dtype="int64")
tokens = torch.from_numpy(tokens).long().reshape(batch, max_seq_len).cuda()
torch_decoder = TransformerDecoder(args, dictionary, embedding, True).cuda().half().eval()
eet_config = {"data_type":torch.float16,"max_batch":batch,"full_seq_len":context_len}
eet_model = EETTransformerDecoder.from_buffer(torch_decoder = torch_decoder,dictionary = dictionary,args = args,config = eet_config,no_encoder_attn = True)
torch.cuda.synchronize()
t1 = time.perf_counter()
for i in range(100):
first_pass = True
reorder_state = None
for step in range(context_len - 1, max_seq_len):
if first_pass:
input_ids_eet = tokens[:, :step + 1].contiguous().cuda().long()
else:
input_ids_eet = tokens[:, step:step + 1].contiguous().cuda().long()
res_eet = eet_model(input_ids_eet,reorder_state=reorder_state, first_pass = first_pass)
first_pass = False
torch.cuda.synchronize()
t2 = time.perf_counter()
print('Time for EET : ', t2 - t1)
torch.cuda.synchronize()
t3 = time.perf_counter()
for i in range(100):
incremental_state = {}
for step in range(0, max_seq_len):
res_torch, incremental_state = torch_decoder(tokens[:,:step+1], incremental_state=incremental_state)
torch.cuda.synchronize()
t4 = time.perf_counter()
print('Time for Fairseq : ', t4 - t3)
print('SpeedUp is : ', (t4 - t3)/(t2- t1))
if __name__ == '__main__':
main()
|
NetEase-FuXi/EET | python/eet/transformers/modeling_bert.py | #
# Created by djz on 2021/01/21.
#
"""EET transformers bert model. """
import math
import time
import torch
import torch.nn as nn
import numpy as np
from torch import Tensor
from typing import Any, Dict, List, Optional, Tuple
from transformers import BertModel
from EET import MetaDesc as meta_desc
from EET import FeedForwardNetwork as eet_ffn
from EET import MultiHeadAttention as eet_attention
from EET import Embedding as eet_embedding
BEGIN_OF_PARAM = 8
__all__ = [
'EETBertEmbedding', 'EETBertFeedforward', 'EETBertAttention', 'EETBertEncoderLayer', 'EETBertEncoder', 'EETBertModel'
]
class EETBertEmbedding():
def __init__(self,config,embedding_dict,data_type = torch.float32):
self.if_layernorm = True
self.embedding_weights = embedding_dict['embeddings.word_embeddings.weight'].cuda().type(data_type)
self.position_weights = embedding_dict['embeddings.position_embeddings.weight'].cuda().type(data_type)
self.token_type_weights = embedding_dict['embeddings.token_type_embeddings.weight'].cuda().type(data_type)
self.Layernorm_weights = embedding_dict['embeddings.LayerNorm.weight'].cuda().type(data_type)
self.Layernorm_bias = embedding_dict['embeddings.LayerNorm.bias'].cuda().type(data_type)
self.embedding = eet_embedding(config,self.embedding_weights,self.position_weights,self.token_type_weights,self.Layernorm_weights,self.Layernorm_bias)
def __call__(self,
input_ids,
position_ids,
token_type_ids):
return self.embedding.forward_transformers(input_ids,position_ids,token_type_ids,self.if_layernorm)
@staticmethod
def from_torch(config,embedding_dict,data_type = torch.float32):
feedforward = EETBertEmbedding(config,embedding_dict,data_type = data_type)
return feedforward
class EETBertFeedforward():
def __init__(self,config,model_dict,layer_id,data_type = torch.float32):
self.intermediate_weights = torch.t([x[1] for x in model_dict.items() if 'intermediate.dense.weight' in x[0]][0]).contiguous().cuda().type(data_type)
self.intermediate_bias = [x[1] for x in model_dict.items() if 'intermediate.dense.bias' in x[0]][0].cuda().type(data_type)
self.output_weights = torch.t([x[1] for x in model_dict.items() if str(layer_id)+'.output.dense.weight' in x[0]][0]).contiguous().cuda().type(data_type)
self.output_bias = [x[1] for x in model_dict.items() if str(layer_id)+'.output.dense.bias' in x[0]][0].cuda().type(data_type)
self.layernorm_weights = [x[1] for x in model_dict.items() if str(layer_id)+'.output.LayerNorm.weight' in x[0]][0].cuda().type(data_type)
self.layernorm_bias = [x[1] for x in model_dict.items() if str(layer_id)+'.output.LayerNorm.bias' in x[0]][0].cuda().type(data_type)
self.ffn = eet_ffn(config,self.intermediate_weights,self.intermediate_bias,self.output_weights,self.output_bias,self.layernorm_weights,self.layernorm_bias)
def __call__(self,
input_id,
pre_layernorm = True,
add_redusial = True):
return self.ffn.forward(input_id,pre_layernorm,add_redusial)
@staticmethod
def from_torch(config,model_dict,layer_id,data_type = torch.float32):
feedforward = EETBertFeedforward(config,model_dict,layer_id,data_type = data_type)
return feedforward
class EETBertAttention():
def __init__(self,config, model_dict,layer_id,data_type = torch.float32):
q_weights = [x[1] for x in model_dict.items() if 'self.query.weight' in x[0]][0].contiguous().cuda().type(data_type)
k_weights = [x[1] for x in model_dict.items() if 'self.key.weight' in x[0]][0].contiguous().cuda().type(data_type)
v_weights = [x[1] for x in model_dict.items() if 'self.value.weight' in x[0]][0].contiguous().cuda().type(data_type)
self.qkv_weight = torch.cat((q_weights,k_weights,v_weights),0).transpose(0,1).contiguous()
self.q_bias = [x[1] for x in model_dict.items() if 'self.query.bias' in x[0]][0].cuda().type(data_type)
self.k_bias = [x[1] for x in model_dict.items() if 'self.key.bias' in x[0]][0].cuda().type(data_type)
self.v_bias = [x[1] for x in model_dict.items() if 'self.value.bias' in x[0]][0].cuda().type(data_type)
self.out_weights = torch.t([x[1] for x in model_dict.items() if 'attention.output.dense.weight' in x[0]][0]).contiguous().cuda().type(data_type)
self.out_bias = [x[1] for x in model_dict.items() if 'attention.output.dense.bias' in x[0]][0].cuda().type(data_type)
self.layernorm_weights = [x[1] for x in model_dict.items() if 'attention.output.LayerNorm.weight' in x[0]][0].cuda().type(data_type)
self.layernorm_bias = [x[1] for x in model_dict.items() if 'attention.output.LayerNorm.bias' in x[0]][0].cuda().type(data_type)
self.attention = eet_attention(config,self.qkv_weight,self.q_bias,self.k_bias,self.v_bias,self.out_weights,self.out_bias,self.layernorm_weights,self.layernorm_bias)
def __call__(self,
input_id,
pre_padding_len,
pre_layernorm = False,
add_redusial = True):
return self.attention.forward(input_id,pre_padding_len,pre_layernorm,add_redusial)
@staticmethod
def from_torch(config,model_dict,layer_id,data_type = torch.float32):
attention = EETBertAttention(config,model_dict,layer_id,data_type = data_type)
return attention
class EETBertEncoderLayer():
def __init__(self, config, attention,feedforward):
self.attetion = attention
self.feedforward = feedforward
def __call__(self,
x,
pre_padding_len = None,
normalize_before = False):
''' gpt2 model struct '''
''' layernorm->self_attention-> project->addinputbias->layernorm->ffn->addinputbias'''
self_attn_out = self.attetion(input_id = x,
pre_padding_len = pre_padding_len,
pre_layernorm = normalize_before,
add_redusial = True)
out = self.feedforward(self_attn_out,
pre_layernorm = normalize_before,
add_redusial = True)
return out
@staticmethod
def from_torch(config, model_dict,layer_id,data_type = torch.float32):
attention = EETBertAttention.from_torch(config = config, model_dict = model_dict, layer_id = layer_id,data_type = data_type)
feedforward = EETBertFeedforward.from_torch(config = config, model_dict = model_dict, layer_id = layer_id,data_type = data_type)
layer = EETBertEncoderLayer(config, attention, feedforward)
return layer
class EETBertEncoder():
def __init__(self,EncoderLayers):
self.layers = EncoderLayers
def __call__(
self,
x,
pre_padding_len = None,
normalize_before = False
):
for layer in self.layers:
x = layer(x,
pre_padding_len = pre_padding_len,
normalize_before = False)
return x
@staticmethod
def from_torch(layer_model_dict,config,layer_num,data_type = torch.float32):
"""from torch."""
EncoderLayers = []
for i in range(layer_num):
if i < 10:
EncoderLayers.extend(
[
EETBertEncoderLayer.from_torch(config,layer_model_dict['layer.'+str(i)+'.'],i,data_type = data_type)
]
)
else:
EncoderLayers.extend(
[
EETBertEncoderLayer.from_torch(config,layer_model_dict['layer.'+str(i)],i,data_type = data_type)
]
)
eet_encoder = EETBertEncoder(EncoderLayers)
return eet_encoder
class EETBertModel():
def __init__(self,config,embedding,encoder):
self.embedding = embedding
self.encoder = encoder
self.pre_padding_len = torch.empty(0).long()
self.position_ids = torch.arange(0,config.max_position_embeddings).reshape(1,config.max_position_embeddings).cuda()
def __call__(
self,
input_ids,
position_ids = None,
token_type_ids = None,
attention_mask = None,
):
'''
attention_mask:attention_padding_mask(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input.)
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
'''
input_shape = input_ids.size()
position_ids = self.position_ids[:, :input_shape[1]]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=input_ids.device)
if attention_mask is None:
pre_padding_len = self.pre_padding_len
else:
# transformers 0 - padding;1 - nopadding
pre_padding_len = torch.sum(1 - attention_mask,1).long().cuda()
embedding_out = self.embedding(input_ids,position_ids,token_type_ids)
encoder_out = self.encoder(embedding_out,
pre_padding_len = pre_padding_len,
normalize_before = False)
return encoder_out
@staticmethod
def from_pretrained(model_id_or_path: str,max_batch, data_type):
"""from torch."""
torch.set_grad_enabled(False)
model_dict = {}
embedding_dict = {}
torch_model = BertModel.from_pretrained(model_id_or_path)
cfg = torch_model.config
for k, v in torch_model.state_dict().items():
if 'embeddings' in k:
embedding_dict[k] = v
if 'layer' in k:
#BEGIN_OF_PARAM(Length of the beginning of the parameter):
#like 'encoder.layer.0.attention.self.query.weight',the BEGIN_OF_PARAM is the length of 'encoder.'-->8
k = k[BEGIN_OF_PARAM:]
model_dict[k] = v
from itertools import groupby
layer_model_dict = {k: dict(v) for k, v in groupby(list(model_dict.items()), lambda item: item[0][:BEGIN_OF_PARAM])}
device = "cuda:0"
activation_fn = cfg.hidden_act
batch_size = max_batch
config = meta_desc(batch_size, cfg.num_attention_heads, cfg.hidden_size, cfg.num_hidden_layers , cfg.max_position_embeddings, cfg.max_position_embeddings, data_type, device, False, activation_fn)
embedding = EETBertEmbedding.from_torch(config,embedding_dict,data_type)
# embedding = None
encoder = EETBertEncoder.from_torch(layer_model_dict,config, cfg.num_hidden_layers,data_type)
eet_model = EETBertModel(cfg,embedding, encoder)
return eet_model
|
y-labo/RealSenseViewer | rs_viewer.py | import pyrealsense2 as rs
import numpy as np
import cv2
def main():
# RealSense L515 Setting
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
align_to = rs.stream.color
align = rs.align(align_to)
# Start streaming
pipeline.start(config)
window_name = "RealSense Viewer"
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
resize_factor = 0.3
# Main Loop
try:
while True:
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((color_image, depth_colormap))
height = images.shape[0]
width = images.shape[1]
images_resized = cv2.resize(images, (int(width * resize_factor), int(height * resize_factor)))
cv2.imshow(window_name, images_resized)
key = cv2.waitKey(1)
if key != -1:
pipeline.stop()
finally:
pipeline.stop()
if __name__ == '__main__':
main()
|
scarelang/scarelang | scare/init.py | from pyparsing import *
import sys
# String data
sg = QuotedString('"',escquoute='""') # Add string usage example "Hello my name is ""<NAME>""" will output "Hello my name is "<NAME>""
# Variable data
vars = {}
varname = ""
varvalue = ""
# Number data
real = Regex(r"[+-]?\d+\.\d*").setParseAction(lambda t:float(t[0]))
integer = Regex(r"[+-]?\d+").setParseAction(lambda t:int(t[0]))
# Symbol data
lc = Word("{",max=1)
rc = Word("}",max=1)
dot = Word("."max=1)
lc = Word("(",max=1)
rc = Word(")",max=1)
equ = Rord("=",max=1)
# Keywords
dim = Keyword("Dim") # For defining variables
nothing = Keyword("Nothing") # Nothing same as nil
imports = Keyword("Imports") # Import a scare script
use = Keyword("Use") # Use a script. Same as import.
_if = Keyword("If")
system = Keyword("System")
# Console keywords
console = Keyword("Console") # For console actions
writeline = Keyword("WriteLine") # Write to console
# Parsing functioning
def imp(s,l,t): # Import function
f = open(s,l,t)
return f.read()
def write(s,l,t):
print(t)
def setVarName(s,l,t):
varname = t
def setVarValue(s,l,t):
if varname == "":
print("Error: invalid variable name.")
else:
vars[varname] = varvalue
varname = ""
varvalue = ""
def callVar(s,l,t):
if vars[t]:
return vars[t]
else:
print("next")
def editVar1(s,l,t):
varname = t
def editVar2(s,l,t):
if varname == "":
print("Error: invalid variable name.")
else:
vars[varname] = varvalue
varname = ""
varvalue = ""
# Parse script
file = sys.argv[0]
file = open(file,"r").read()
IMPORTS = imports + sg.SetParseAction(imp)
WRITELINE = console + dot + writeline + lc + sg.SetParseAction(write) + rc
DEFINE_VAR = dim + word(alphas).SetParseAction(setVarName) + equ + word(alphas).SetParseAction(setVarValue)
CALL_VAR = word(alphas).SetParseAction(callVar)
SET_VAR = word(alphas).SetParseAction(editVar1) + equ + word(alphas).SetParseAction(editVar2)
|
plant-phenotyping/rpi-phenotyping | capture_image.py | #!/usr/bin/env python3
import RPi.GPIO as gp
import os
import socket
import datetime as dt
gp.setwarnings(False)
gp.setmode(gp.BOARD)
gp.setup(7, gp.OUT)
gp.setup(11, gp.OUT)
gp.setup(12, gp.OUT)
gp.output(11, True)
gp.output(12, True)
def main():
gp.output(7, False)
gp.output(11, False)
gp.output(12, True)
capture(1)
gp.output(7, False)
gp.output(11, True)
gp.output(12, False)
capture(2)
def get_time_and_date(date_time):
date = date_time.split(" ")[0]
time = date_time.split(" ")[1].split(".")[0].split(":")
new_time = time[0] + "-" + time[1] + "-" + time[2]
return date, new_time
def capture(cam):
date_time = str(dt.datetime.now())
date, time = get_time_and_date(date_time)
zone_number = str(socket.gethostname())[-1]
cmd = "raspistill -o ~/Desktop/camera%d/z%sc%d--%s--%s.png" % (cam, zone_number, cam, date, time)
os.system(cmd)
if __name__ == "__main__":
main()
gp.output(7, False)
gp.output(11, False)
gp.output(12, True) |
ghaughian/kdb-doc-manager | tests/test_kdb_doc_manager.py | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import time
from mongo_connector.command_helper import CommandHelper
from mongo_connector.doc_managers.kdb_doc_manager import DocManager
from mongo_connector.test_utils import TESTARGS
sys.path[0:0] = [""]
from tests import unittest, kdb_url
from tests.test_kdb import KdbTestCase
class TestKdbDocManager(KdbTestCase):
"""Test class for KdbDocManager
"""
def setUp(self):
"""Empty KDB at the start of every test
"""
self._remove()
def test_update(self):
doc_id = '1'
doc = {"id": doc_id, "title": "abc", "description": "def"}
self.docman.upsert(doc, *TESTARGS)
# $set only
update_spec = {"$set": {"title": "qaz", "description": "wsx"}}
doc = self.docman.update(doc_id, update_spec, *TESTARGS)
expected = {"id": doc_id, "title": "qaz", "description": "wsx"}
for k, v in expected.items():
self.assertEqual(doc[k], v)
# $unset only
update_spec = {"$unset": {"title": True}}
doc = self.docman.update(doc_id, update_spec, *TESTARGS)
expected = {"id": '1', "description": "wsx"}
for k, v in expected.items():
self.assertEqual(doc[k], v)
self.assertNotIn("title", doc)
# mixed $set/$unset
update_spec = {"$unset": {"description": True},
"$set": {"subject": "edc"}}
doc = self.docman.update(doc_id, update_spec, *TESTARGS)
expected = {"id": '1', "subject": "edc"}
for k, v in expected.items():
self.assertEqual(doc[k], v)
self.assertNotIn("description", doc)
def test_replacement_unique_key(self):
docman = DocManager(kdb_url, unique_key='id')
# Document coming from kdb. 'id' is the unique key.
from_kdb = {'id': 1, 'title': 'unique key replacement test'}
# Replacement coming from an oplog entry in MongoDB. Still has '_id'.
replacement = {'id': 1, 'title': 'unique key replaced!'}
replaced = docman.apply_update(from_kdb, replacement)
self.assertEqual('unique key replaced!', replaced['title'])
def test_upsert(self):
"""Ensure we can properly insert into Kdb via DocManager.
"""
#test upsert
docc = {'id': '1', 'name': 'John'}
self.docman.upsert(docc, *TESTARGS)
res = self.kdb_conn.sync('select from .test.test')
for doc in res:
self.assertTrue(doc is not None)
self.assertTrue(doc['id'] == '1' and doc['name'] == 'John')
docc = {'id': '1', 'name': 'Paul'}
self.docman.upsert(docc, *TESTARGS)
res = self.kdb_conn.sync('select from .test.test')
for doc in res:
self.assertTrue(doc['id'] == '1' and doc['name'] == 'Paul')
def test_bulk_upsert(self):
"""Ensure we can properly insert many documents at once into
Kdb via DocManager
"""
docs = ({"id": i} for i in range(1, 1001))
self.docman.bulk_upsert(docs, *TESTARGS)
res = sorted(int(x["id"])
for x in self.kdb_conn.sync('?[.test.test;enlist(<;`i;1001);0b;()]'))
self.assertEqual(len(res), 1000)
for i, r in enumerate(res):
self.assertEqual(r, i)
docs = ({"id": i, "weight": 2*i} for i in range(1,1001))
self.docman.bulk_upsert(docs, *TESTARGS)
res = sorted(int(x["weight"])
for x in self.kdb_conn.sync('?[.test.test;enlist(<;`i;1001);0b;()]'))
self.assertEqual(len(res), 1000)
for i, r in enumerate(res):
self.assertEqual(r, 2*i)
def test_remove(self):
"""Ensure we can properly delete from kdb via DocManager.
"""
#test remove
docc = {'id': '1', 'name': 'John'}
self.docman.upsert(docc, *TESTARGS)
res = self.kdb_conn.sync('select from .test.test')
self.assertEqual(len(res), 1)
self.docman.remove(docc['id'], *TESTARGS)
res = self.kdb_conn.sync('select from .test.test')
self.assertEqual(len(res), 0)
def test_search(self):
"""Query KDB for docs in a timestamp range.
We use API and DocManager's search(start_ts,end_ts), and then compare.
"""
#test search
docc = {'id': '1', 'name': 'John'}
self.docman.upsert(docc, 'test.test', 5767301236327972865)
docc = {'id': '2', 'name': '<NAME>'}
self.docman.upsert(docc, 'test.test', 5767301236327972866)
docc = {'id': '3', 'name': 'Paul'}
self.docman.upsert(docc, 'test.test', 5767301236327972870)
search = list(self.docman.search(5767301236327972865,
5767301236327972866))
self.assertEqual(2, len(search),
'Should find two documents in timestamp range.')
result_names = [result.get("name") for result in search]
self.assertIn('John', result_names)
self.assertIn('<NAME>', result_names)
def test_get_last_doc(self):
"""Insert documents, Verify the doc with the latest timestamp.
"""
#test get last doc
docc = {'id': '4', 'name': 'Hare'}
self.docman.upsert(docc, 'test.test', 2)
docc = {'id': '5', 'name': 'Tortoise'}
self.docman.upsert(docc, 'test.test', 1)
doc = self.docman.get_last_doc()
self.assertTrue(doc is not None)
self.assertTrue(doc['id'] == '4')
docc = {'id': '6', 'name': 'HareTwin', 'ts': '2'}
doc = self.docman.get_last_doc()
self.assertTrue(doc['id'] == '4' or doc['id'] == '6')
def test_commands(self):
self.docman.command_helper = CommandHelper()
def count_ns(ns):
return sum([self.kdb_conn.sync("count .{}".format(ns))])
self.docman.upsert({'id': '1', 'test': 'data'}, *TESTARGS)
self.assertEqual(count_ns("test.test"), 1)
self.docman.handle_command({'drop': 'test'}, *TESTARGS)
time.sleep(1)
self.assertEqual(count_ns("test.test"), 0)
self.docman.upsert({'id': '2', 'test': 'data'}, 'test.test2', '2')
self.docman.upsert({'id': '3', 'test': 'data'}, 'test.test3', '3')
self.docman.handle_command({'dropDatabase': 1}, 'test.$cmd', 1)
time.sleep(1)
self.assertEqual(count_ns("test.test2"), 0)
self.assertEqual(count_ns("test.test3"), 0)
if __name__ == '__main__':
unittest.main()
|
ghaughian/kdb-doc-manager | tests/test_kdb.py | # -*- encoding: utf-8 -*-
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test KDB using the synchronizer, i.e. as it would be used by an user
"""
import logging
import os
import sys
import time
from bson import SON
from mongo_connector.compat import u
from mongo_connector.connector import Connector
from mongo_connector.test_utils import ReplicaSet, assert_soon
from mongo_connector.util import retry_until_ok
from qpython import qconnection
from qpython.qtype import QException
sys.path[0:0] = [""]
from mongo_connector.doc_managers.kdb_doc_manager import DocManager
from tests import unittest, kdb_url
class KdbTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kdb_conn = qconnection.QConnection(host=kdb_url.split(':')[0], port=int(kdb_url.split(':')[1]))
cls.kdb_conn.open()
cls.docman = DocManager(kdb_url, unique_key='id')
def setUp(self):
# Create test database in KDB+
self.kdb_conn.sync("`.test.test set ([]id:(); ts:`long$(); ns:(); name:(); title:(); description:(); subject:(); data:(); a:`int$(); b_0_c:`int$(); b_10_c:`int$(); b_0_e:`int$(); b_1_d:`int$(); b_1_f:`int$(); b_2_e:`int$(); billing_address_street:(); billing_address_state:(); numbers_0:(); numbers_1:(); numbers_2:(); characters_0_name:(); characters_0_color:(); characters_1_name:(); characters_1_color:(); characters_2:(); popularity:`int$());")
def _search(self, query):
return self.docman._stream_search(query)
def _remove(self):
self.kdb_conn.sync("![`.test;();0b;enlist`test];`.test.test set ([]id:(); ts:`long$(); ns:(); name:(); title:(); description:(); subject:(); data:(); a:`int$(); b_0_c:`int$(); b_10_c:`int$(); b_0_e:`int$(); b_1_d:`int$(); b_1_f:`int$(); b_2_e:`int$(); billing_address_street:(); billing_address_state:(); numbers_0:(); numbers_1:(); numbers_2:(); characters_0_name:(); characters_0_color:(); characters_1_name:(); characters_1_color:(); characters_2:(); popularity:`int$());")
class TestKdb(KdbTestCase):
""" Tests Kdb
"""
@classmethod
def setUpClass(cls):
KdbTestCase.setUpClass()
cls.repl_set = ReplicaSet().start()
cls.conn = cls.repl_set.client()
@classmethod
def tearDownClass(cls):
""" Kills cluster instance
"""
cls.repl_set.stop()
def setUp(self):
self._remove()
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
docman = DocManager(kdb_url, unique_key='id')
self.connector = Connector(
mongo_address=self.repl_set.uri,
ns_set=['test.test'],
doc_managers=(docman,),
)
retry_until_ok(self.conn.test.test.drop)
self._remove()
self.connector.start()
assert_soon(lambda: len(self.connector.shard_set) > 0)
def tearDown(self):
self.connector.join()
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync('?[.test.test;();0b;()]')) > 0)
result_set_1 = self.kdb_conn.sync('?[.test.test;enlist(~\:;`name;"paulie");0b;()]')
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
doc = {}
for k, v in item.items():
doc[k] = v
self.assertEqual(doc['id'], str(result_set_2['_id']))
self.assertEqual(doc['name'], result_set_2['name'])
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync("?[.test.test;();0b;()]")) == 1)
self.conn['test']['test'].delete_one({'name': 'paulie'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync("?[.test.test;();0b;()]")) == 0)
def test_update(self):
"""Test update operations on Kdb.
Need to have the following fields defined in schema.q:
a:`int$(); b_0_c:`int$(); b_10_c:`int$(); b_0_e:`int$();
b_1_d:`int$(); b_1_f:`int$(); b_2_e:`int$()
"""
docman = self.connector.doc_managers[0]
self.conn.test.test.insert_one({"a": 0})
assert_soon(lambda: sum(1 for _ in self._search("()")) == 1)
def check_update(update_spec):
updated = self.conn.test.command(
SON([('findAndModify', 'test'),
('query', {"a": 0}),
('update', update_spec),
('new', True)]))['value']
# Stringify _id to match what will be retrieved from Kdb
updated[u('_id')] = u(updated['_id'])
# Flatten the MongoDB document to match Kdb
updated = docman._clean_doc(updated, 'dummy.namespace', 0)
def update_worked():
replicated = list(self._search("ns:test.test;a=0"))[0]
return replicated == updated
# Allow some time for update to propagate
assert_soon(update_worked)
# Update by adding a field.
# Note that Kdb can't mix types within an array
check_update({"$set": {"b": [{"c": 10}, {"d": 11}]}})
# Update by setting an attribute of a sub-document beyond end of array.
check_update({"$set": {"b.10.c": 42}})
# Update by changing a value within a sub-document (contains array)
check_update({"$inc": {"b.0.c": 1}})
# Update by changing the value within an array
check_update({"$inc": {"b.1.f": 12}})
# Update by adding new bucket to list
check_update({"$push": {"b": {"e": 12}}})
# Update by replacing an entire sub-document
check_update({"$set": {"b.0": {"e": 4}}})
# Update by adding a sub-document
check_update({"$set": {"b": {"0": {"c": 100}}}})
# Update whole document
check_update({"a": 0, "b": {"1": {"d": 10000}}})
def test_rollback(self):
"""Tests rollback. We force a rollback by inserting one doc, killing
primary, adding another doc, killing the new primary, and
restarting both the servers.
"""
primary_conn = self.repl_set.primary.client()
self.conn['test']['test'].insert_one({'name': 'paul'})
assert_soon(
lambda: self.conn.test.test.find({'name': 'paul'}).count() == 1)
assert_soon(
lambda: sum(1 for _ in self.kdb_conn.sync('?[`.test.test;();0b;()]')) == 1)
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin_db = new_primary_conn['admin']
while admin_db.command("isMaster")['ismaster'] is False:
time.sleep(1)
time.sleep(5)
retry_until_ok(self.conn.test.test.insert_one, {'name': 'pauline'})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync('?[`.test.test;();0b;()]')) == 2)
result_set_1 = list(self.kdb_conn.sync('?[`.test.test;enlist(~\:;`name;"pauline");0b;()]'))
result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
self.assertEqual(len(result_set_1), 1)
for item in result_set_1:
self.assertEqual(item['_id'], str(result_set_2['_id']))
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
while primary_conn['admin'].command("isMaster")['ismaster'] is False:
time.sleep(1)
self.repl_set.secondary.start()
time.sleep(2)
result_set_1 = self.kdb_conn.sync('?[`.test.test;enlist(~\:;`name;"pauline");0b;()]')
self.assertEqual(sum(1 for _ in result_set_1), 0)
result_set_2 = self.kdb_conn.sync('?[`.test.test;enlist(~\:;`name;"paul");0b;()]')
self.assertEqual(sum(1 for _ in result_set_2), 1)
def test_valid_fields(self):
""" Tests documents with field definitions
"""
inserted_obj = self.conn['test']['test'].insert_one(
{'name': 'test_valid'}).inserted_id
self.conn['test']['test'].update_one(
{'_id': inserted_obj},
{'$set': {'popularity': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in self._search("()")) > 0)
result = docman.get_last_doc()
self.assertIn('popularity', result)
self.assertEqual(sum(1 for _ in self._search("name ~\:\"test_valid\"")), 1)
def test_invalid_fields(self):
""" Tests documents without field definitions
"""
inserted_obj = self.conn['test']['test'].insert_one(
{'name': 'test_invalid'}).inserted_id
self.conn['test']['test'].update_one(
{'_id': inserted_obj},
{'$set': {'break_this_test': 1}}
)
docman = self.connector.doc_managers[0]
assert_soon(lambda: sum(1 for _ in self._search("()")) > 0)
result = docman.get_last_doc()
self.assertNotIn('break_this_test', result)
self.assertEqual(sum(1 for _ in self._search(
"name ~\:\"test_invalid\"")), 1)
def test_nested_fields(self):
"""Test indexing fields that are sub-documents in MongoDB
The following fields are defined in the provided schema.q:
billing_address_street:(); billing_address_state:();
numbers_0:(); numbers_1:(): numbers_2:()
characters_0_name:(); characters_0_color:();
characters_1_name:(); characters_1_color:();
characters_2:()
"""
# Connector is already running
self.conn["test"]["test"].insert_one({
"name": "Jeb",
"billing": {
"address": {
"street": "12345 Mariposa Street",
"state": "California"
}
}
})
self.conn["test"]["test"].insert_one({
"numbers": ["one", "two", "three"],
"characters": [
{"name": "<NAME>",
"color": "yellow"},
{"name": "Elmo",
"color": "red"},
"Cookie Monster"
]
})
assert_soon(lambda: sum(1 for _ in self.kdb_conn.sync("?[`.test.test;();0b;()]")) > 0,
"documents should have been replicated to Kdb")
# Search for first document
results = self.kdb_conn.sync("?[`.test.test;enlist(~\:;`billing_address_street;\"12345 Mariposa Street\");0b;()]")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["billing_address_state"],
"California")
# Search for second document
results = self.kdb_conn.sync("?[`.test.test;enlist(~\:;`characters_1_color;\"red\");0b;()]")
self.assertEqual(len(results), 1)
self.assertEqual(next(iter(results))["numbers.2"], "three")
results = self.kdb_conn.sync("?[`.test.test;enlist(~\:;`characters_2;\"Cookie Monster\");0b;()]")
self.assertEqual(len(results), 1)
if __name__ == '__main__':
unittest.main()
|
ghaughian/kdb-doc-manager | mongo_connector/doc_managers/kdb_doc_manager.py | # Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for Kdb+.
"""
import json
import logging
import os
import re
from itertools import izip
from qpython import qconnection
from qpython.qcollection import qlist, QDictionary
from qpython.qtype import *
from mongo_connector import errors
from mongo_connector.compat import u
from mongo_connector.constants import DEFAULT_MAX_BULK
from mongo_connector.compat import (
Request, urlopen, urlencode, URLError, HTTPError)
from mongo_connector.util import exception_wrapper, retry_until_ok
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
from mongo_connector.doc_managers.formatters import DocumentFlattener
wrap_exceptions = exception_wrapper({
QException: errors.OperationFailed,
URLError: errors.ConnectionFailed,
HTTPError: errors.ConnectionFailed
})
decoder = json.JSONDecoder()
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, unique_key='id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify KDB URL and establish a connection.
"""
self.url = url
self.q = qconnection.QConnection(host=url.split(':')[0], port=int(url.split(':')[1]))
self.q.open()
self.unique_key = unique_key
self.chunk_size = chunk_size
self.field_list = {}
self.field_conversion = {}
self._formatter = DocumentFlattener()
def _convert_types(self, field_conversion, field, value):
if not field_conversion.has_key(field):
return value.encode('utf-8',errors='ignore')
elif 'b' == field_conversion[field]:
return numpy.bool(value)
elif 'x' == field_conversion[field]:
return numpy.byte(value)
elif 'h' == field_conversion[field]:
return numpy.int16(value)
elif 'i' == field_conversion[field]:
return numpy.int32(value)
elif 'j' == field_conversion[field]:
return numpy.int64(value)
elif 'e' == field_conversion[field]:
return numpy.float32(value)
elif 'f' == field_conversion[field]:
return numpy.float64(value)
elif 's' == field_conversion[field]:
return numpy.string_(value)
elif field_conversion[field] in 'p':
return numpy.datetime64[ns](value)
elif field_conversion[field] in 'm':
return numpy.datetime64[M](value)
elif field_conversion[field] in 'd':
return numpy.datetime64[D](value)
elif field_conversion[field] in 'z':
return numpy.datetime64[ms](value)
elif field_conversion[field] in 'n':
return numpy.timedelta64[ns](value)
elif field_conversion[field] in 'u':
return numpy.timedelta64[m](value)
elif field_conversion[field] in 'v':
return numpy.timedelta64[s](value)
elif field_conversion[field] in 't':
return numpy.timedelta64[ms](value)
else:
return value.encode('utf-8',errors='ignore')
def _clean_doc(self, doc, namespace, timestamp):
"""Reformats the given document before insertion into KDB.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
- inserts namespace and timestamp metadata into the document in order
to handle rollbacks
An example:
{"a": 2,
"b": { "c": { "d": 5 } },
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b_c_d": 5, "e_0": 6, "e_1": 7, "e_2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from KDB
# as part of update.
if '_id' in doc:
doc[self.unique_key] = u(doc.pop("_id"))
# Update namespace and timestamp metadata
if 'ns' in doc or 'ts' in doc:
raise errors.OperationFailed(
'Need to set "ns" and "ts" fields, but these fields already '
'exist in the document %r!' % doc)
doc['ns'] = namespace.encode('UTF8')
doc['ts'] = u(timestamp)
# KDB cannot store fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
if not self.field_list.has_key(namespace):
self.field_list[namespace] = list(u(f) for f in self.q.sync('exec c from meta .{}'.format(namespace)))
self.field_conversion[namespace] = dict(zip(list(self.q.sync('exec c from meta .{} where not t in (" cC")'.format(namespace))), list(self.q.sync('exec t from meta .{} where not t in (" cC")'.format(namespace)))))
# Only include fields that are explicitly provided in the schema
field_list = self.field_list[namespace]
field_conversion = self.field_conversion[namespace]
if len(field_list) > 0:
flat_doc = dict((k.replace('.','_',10), self._convert_types(field_conversion,k,v)) for k, v in flat_doc.items() if k in field_list)
print("#### flattened doc looks like: {}".format(flat_doc))
return flat_doc
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
@wrap_exceptions
def handle_command(self, doc, namespace, timestamp):
db, _ = namespace.split('.', 1)
if doc.get('dropDatabase'):
for new_db in self.command_helper.map_db(db):
self.q.sync('![`.{0};();0b;]each enlist each tables[`.{0}]'.format(new_db))
if doc.get('renameCollection'):
raise errors.OperationFailed(
"kdb_doc_manager does not support replication of "
" renameCollection")
if doc.get('create'):
# nothing to do
pass
if doc.get('drop'):
new_db, coll = self.command_helper.map_collection(db, doc['drop'])
if new_db:
self.q.sync('![`.{0};();0b;enlist `{1}]'.format(new_db,coll))
def apply_update(self, doc, update_spec, namespace=''):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update_spec contains the new document.
# Update the key in kdb based on the unique_key mentioned as
# parameter.
update_spec['_id'] = doc[self.unique_key]
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
field_list = self.field_list[namespace]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
if to_set in field_list:
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc
@wrap_exceptions
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
# first select the row that matchs and drop the `ns and `ts columns
results = self.q.sync('first ![;();0b;`ns`ts]?[`.{0};enlist(~\:;`{1};"{2}");0b;();1]'.format(namespace, self.unique_key, u(document_id)))
doc = {}
if isinstance(results, QDictionary):
for k, v in results.items():
doc[k]=v
# Results should be a KDB dict containing only 1 result
#for row in results:
print("######## row to update: {}".format(doc))
updated = self.apply_update(doc, update_spec, namespace)
self.upsert(updated, namespace, timestamp)
return updated
@wrap_exceptions
def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into KDB
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
doc = self._clean_doc(doc, namespace, timestamp)
self.q.sync('insert', numpy.string_('.{}'.format(namespace)), QDictionary(qlist([numpy.string_(x) for x in doc.keys()],qtype=QSYMBOL_LIST), doc.values()))
@wrap_exceptions
def bulk_upsert(self, docs, namespace, timestamp):
"""Update or insert multiple documents into KDB
docs may be any iterable
"""
if not docs:
print("########## NO DOCS!!!! #########")
return
cleaned = (self._clean_doc(d, namespace, timestamp) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
print('#### in the bulk_upsert_1 function ####')
for rec in batch:
print(rec)
self.q.sync('insert', numpy.string_('.{}'.format(namespace)), QDictionary(qlist([numpy.string_(x) for x in rec.keys()],qtype=QSYMBOL_LIST), rec.values()))
#self.q.sync('insert', numpy.string_('.{}'.format(namespace)), QDictionary(qlist([numpy.string_(x) for x in doc.keys()],qtype=QSYMBOL_LIST), doc.values()))
#self.q.sync('insert', '.{}'.format(namespace), QDictionary(qlist(rec.keys(),qtype=QGENERAL_LIST), qlist([x.encode('utf-8') for x in rec.values()],qtype=QGENERAL_LIST)))
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
#self.q.sync('insert', namespace, QDictionary(qlist(cleaned.keys(),qtype=QSYMBOL_LIST), cleaned.values()))
print('#### in the bulk_upsert_2 function ####')
self.q.sync('insert', '.{}'.format(namespace), cleaned)
@wrap_exceptions
def insert_file(self, f, namespace, timestamp):
raise errors.OperationFailed("kdb_doc_manager does not support replication of insert_file")
@wrap_exceptions
def remove(self, document_id, namespace, timestamp):
"""Removes documents from KDB+
The input is a python dictionary that represents a mongo document.
"""
self.q.sync('![`.{0};enlist(~\:;`{1};\"{2}\");0b;`symbol$()]'.format(namespace, self.unique_key, u(document_id)))
return 0
def _ns_and_qry(self, query):
"""Helper method for getting the kdb database name to apply query to.
e.g. query = "ns:test.nest;a=0"
"""
ns, qry = query.split('ns:',1)[1].split(';', 1)
if qry is None:
qry='()'
return ns.lower(), qry
def _get_tables(self):
return self.q.sync('raze {`$(string[x],"."),/:string tables[x]} each `$".",/:string except[key`;`q`Q`h`j`o]')
@wrap_exceptions
def _stream_search(self, query):
"""Helper method for iterating over KDB+ results."""
kdb_query=''
if query.startswith('ns:'):
ns, qry = self._ns_and_qry(query)
if '()' == qry:
kdb_query = '?[`.{0};();0b;();1000000]'.format(ns)
else:
kdb_query = '?[`.{0};enlist parse"{1}";0b;();1000000]'.format(ns, qry)
else:
tabs = self._get_tables()
if len(tabs) == 1:
tab = 'enlist {}'.format(tabs[0])
else:
tab = '({})'.format(";".join(tabs))
if '()' == query:
kdb_query = 'raze ?[;();0b;();1000000] each {0}'.format(,tab)
else:
kdb_query = 'raze ?[;enlist parse"{0}";0b;();1000000] each {1}'.format(query,tab)
for doc in self.q.sync(kdb_query):
subdoc = {}
if isinstance(doc, QDictionary):
for k, v in doc.items():
subdoc[k]=v
if self.unique_key != "_id":
subdoc["_id"] = subdoc.pop(self.unique_key)
yield subdoc
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query KDB for documents in a time range."""
query = 'ts within ({0};{1})'.format(start_ts, end_ts)
return self._stream_search(query)
@wrap_exceptions
def commit(self):
pass
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in the KDB engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
tabs = self._get_tables()
if len(tabs) == 1:
tab = 'enlist {}'.format(tabs[0])
else:
tab = '({})'.format(";".join(tabs))
#result = self.q.sync('select max[ts] from raze ?[;enlist(=;`ts;(max;`ts));0b;()] each ({})'.format(";".join(tabs)))
result = self.q.sync('?[;enlist(=;`ts;(max;`ts));0b;()] uj/[?[;enlist(=;`ts;(max;`ts));0b;()] each {}]'.format(tab))
except ValueError:
return None
for r in result:
doc = {}
if isinstance(r, QDictionary):
for k, v in r.items():
doc[k]=v
doc['_id'] = doc.pop(self.unique_key)
return doc
|
sdesimone/ML4iOS | ML4iOSTests/data/pythonTests.py | <filename>ML4iOSTests/data/pythonTests.py
#!/usr/bin/env python
from bigml.api import BigML
from bigml.model import Model
from bigml.ensemble import Ensemble
from bigml.anomaly import Anomaly
from bigml.cluster import Cluster
from bigml.logistic import LogisticRegression
api = BigML(dev_mode=False)
#clusterJson = api.get_cluster('cluster/5644d1ad636e1c79b00037b9')
#cluster = Cluster('cluster/5644d1ad636e1c79b00037b9', api=api)
#prediction = cluster.centroid({'petal length': 4.07, 'sepal width': 3.15, 'petal width': 1.51, 'sepal length' : 4.0, 'species' : 'Iris-setosa'}, by_name=True)
#cluster = Cluster('cluster/565f088fce165e0a1401783c', api=api)
#prediction = cluster.centroid({'Team': "Atlanta Braves", 'Salary': 30000, 'Position': "Pitcher"}, by_name=True)
#prediction = cluster.centroid({'Team': "Atlanta Braves", 'Salary': 30000000000, 'Position': "Shortstop"}, by_name=True)
# api.pprint(prediction)
# prediction = cluster.centroid({'petal length': 2.07, 'sepal width': 4.8, 'petal width': 2.51, 'sepal length' : 8.2, 'species' : 'iris-setosa'}, by_name=True)
# model = api.get_model('model/563a1c7a3cd25747430023ce')
# prediction = api.create_prediction(model, {'petal length': 4.07, 'sepal width': 3.15, 'petal width': 1.51})
# local_model = Model('model/56430eb8636e1c79b0001f90', api=api)
# prediction = local_model.predict({'petal length': 0.96, 'sepal width': 4.1, 'petal width': 2.52}, 2, add_confidence=True, multiple=3)
#local_model = Ensemble('ensemble/563219b8636e1c5eca006d38', api=api)
# local_model = Ensemble('ensemble/564a081bc6c19b6cf3011c60', api=api)
#prediction = local_model.predict({'petal length': 0.96, 'sepal width': 2.25, 'petal width': 1.51, 'sepal length': 6.02}, method=2, add_confidence=True)
#local_model = Ensemble('ensemble/5666fb621d55051209009f0f', api=api)
#prediction = local_model.predict({'Salary': 18000000, 'Team' : 'Atlanta Braves'}, method=0, add_confidence=True)
#local_model = Ensemble('ensemble/566954af1d5505120900bf69', api=api)
#prediction = local_model.predict({'Price' : 5.8, 'Grape' : 'Pinot Grigio', 'Rating' : 89, 'Country' : 'Italy'}, method=1, add_confidence=True, add_distribution=True)
# local_ensemble = Ensemble('ensemble/564623d4636e1c79b00051f7', api=api)
# prediction = local_ensemble.predict({'Price' : 5.8, 'Grape' : 'Pinot Grigio', 'Country' : 'Italy', 'Rating' : 92}, True)
# local_anomaly = Anomaly('anomaly/564c5a76636e1c3d52000007', api=api)
# prediction = local_anomaly.anomaly_score({'petal length': 4.07, 'sepal width': 3.15, 'petal width': 1.51, 'sepal length': 6.02, 'species': 'Iris-setosa'}, True)
# prediction = local_anomaly.anomaly_score({'petal length': 0.96, 'sepal width': 4.1, 'petal width': 2.51, 'sepal length': 6.02, 'species': 'Iris-setosa'}, True)
# prediction = local_anomaly.anomaly_score({'petal length': 0.96, 'sepal width': 4.1, 'petal width': 2.51}, True)
logistic_regression = LogisticRegression(
'logisticregression/5697c1179ed2334090003217')
prediction = logistic_regression.predict({"petal length": 4.07, "petal width": 14.07,
"sepal length": 6.02, "sepal width": 3.15})
api.pprint(prediction)
|
osaukh/pollenpub | pollen_library/plot_pollen_sample.py | <filename>pollen_library/plot_pollen_sample.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 12:26:07 2020
@author: khoanam
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import os
from collections import Counter
import cv2
#%% Helper functions
def load_image(path):
img = cv2.imread(path, 1)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
try:
return img[...,::-1]
except:
print('Error', path)
print('**********************************')
def pad_n_resize(im, desired_size):
def create_blank(width, height, rgb_color=(0, 0, 0)):
"""Create new image(numpy array) filled with certain color in RGB"""
# Create black blank image
image = np.zeros((height, width, 3), np.uint8)
# Since OpenCV uses BGR, convert the color first
color = tuple(reversed(rgb_color))
# Fill image with color
image[:] = rgb_color
return image
square_size = max(im.shape)
new_img = create_blank(square_size,square_size)
left = (square_size - im.shape[1]) // 2
top = (square_size - im.shape[0]) // 2
new_img[top: top + im.shape[0], left:left+im.shape[1], :] = im
new_img = cv2.resize(new_img, (desired_size,desired_size), interpolation = cv2.INTER_CUBIC)
return new_img
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
self.embedding = None
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
#**********************************************************************************
def load_metadata(path):
metadata = []
for i in sorted(os.listdir(path)):
for f in sorted(os.listdir(os.path.join(path, i))):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg':
metadata.append(IdentityMetadata(path, i, f))
return np.array(metadata)
#%% Load data and plot
img_dir = 'images_16_types'
metadata = load_metadata(img_dir)
targets = np.array([m.name for m in metadata])
classes = Counter(targets).keys()
classes = [*classes]
print(classes)
imgs_by_class = {}
for c in classes:
file_idx = np.array([m.file for m in metadata if m.name == c])
imgs_by_class[c] = file_idx
n_rows = 10
n_cols = 5
grid_size = n_rows* n_cols
for c in classes:
imgs = []
random_list = np.random.choice(len(imgs_by_class[c]), size = grid_size, replace=False)
for i in range(grid_size):
img = load_image(os.path.join(img_dir, c, imgs_by_class[c][random_list[i]]))
img = pad_n_resize(img, 96)
imgs.append(img)
fig = plt.figure(figsize=(30, 30))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(n_rows, n_cols), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for ax, im in zip(grid, imgs):
# Iterating over the grid returns the Axes.
ax.imshow(im)
plt.suptitle(f'{c}', fontsize = 80)
# plt.title(c, fontsize = 24)
plt.show() |
osaukh/pollenpub | code/create_folds.py | '''Copyright (C) 2019 <NAME> of Technology (ETH Zurich), <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Changes to original version: Adopted to work with multi-layer pollen data
'''
import os
import pandas as pd
import argparse
import numpy as np
from os.path import join
from pathlib import Path
'''
Script generates a training config based on a directory containing two folders (*labels* and *layers*)
and one text file containing the classes (*classes.names*).
'''
parser = argparse.ArgumentParser()
parser.add_argument("--dir",'-f', type=str, default="data/pollen_deployment/test.txt", help="path to model definition file")
parser.add_argument("--name",'-n', type=str, default="pollen", help="base name of the dataset")
parser.add_argument("--output_dir",'-o', type=str, default="data/pollen_joint/", help="where to store the fold information")
parser.add_argument("--merge_set",'-m', type=str, default=None, help="optional set to be merged with training set")
parser.add_argument('-K', type=int, default=1, help="Number of folds to generate")
parser.add_argument("--stats_only", action='store_true', help="Output dataset statistics only")
args = parser.parse_args()
# df = pd.read_csv(args.filename, sep=" ", header=None, names=['filename'])
p = Path(args.dir)
filenames = {'filename':[path.relative_to(p) for path in p.glob('layers/*')]}
df = pd.DataFrame(filenames)
df.sort_values('filename',inplace=True)
df['group'] = df['filename'].apply(lambda x: str(x).split('/')[-1].split('-')[0])
groups = df['group'].unique()
print("Number of layers \t", len(df))
print("Number of stacks \t", len(groups))
print("Average Number Layers per Stack \t", len(df)/len(groups))
if args.stats_only:
total_num_pollen = 0
layers_with_pollen = 0
layers_pollen = []
for filename in df['filename']:
label_file = p/Path(str(filename).replace("layers", "labels").replace(".png", ".txt").replace(".jpg", ".txt"))
if not label_file.exists():
layers_pollen += [0]
continue
# for label_file in p.glob('labels/*.txt'):
num_pollen = 0
with open(label_file) as f:
for line in f: # count the number of lines == number of labeled pollen
num_pollen += 1
if num_pollen > 0:
layers_with_pollen += 1
layers_pollen += [num_pollen]
total_num_pollen += num_pollen
print('Total number of labeled pollen',total_num_pollen)
print('Total number of layers with pollen',layers_with_pollen)
df['num_pollen'] = layers_pollen
print(f"Average number of stacks per train \t {len(groups)/args.K * (args.K-1)}")
print(f"Average number of stacks per test \t {len(groups)/args.K}")
if args.merge_set:
merge_df = pd.read_csv(args.merge_set, sep="\n", header=None, names=['filename'])
print(f'Loaded merge set with {len(merge_df)} items')
else:
merge_df = None
folds = []
for k in range(args.K):
groups_fold = groups[k::args.K]
fold_df = df[df['group'].apply(lambda x: x in groups_fold)]
folds.append(fold_df)
print(f"Fold {k}: Number of stacks {len(groups_fold)}")
print(f"Fold {k}: Number of pollen {fold_df['num_pollen'].sum()}")
print(f"Fold {k}: Layers with pollen {(fold_df['num_pollen']>0).sum()}")
# create training set
indices = np.arange(args.K).astype(np.int)
for k in range(args.K):
if args.K == 1:
base = ''
else:
base = f"fold{k}_"
train_filename = join(args.output_dir,f'{base}train.txt')
test_filename = join(args.output_dir,f'{base}test.txt')
test_set = folds[k]
print(f"Fold{k}: Images in test set \t {len(test_set)}")
if not args.stats_only:
test_set.to_csv(test_filename,columns=['filename'],index=False,header=False, sep='\n')
if args.K > 1:
train_folds = [folds[i] for i in indices[indices!=k]]
train_set = pd.concat(train_folds)
if merge_df is not None:
train_set = train_set.append(merge_df)
print(f"Fold{k}: Images in train set \t {len(train_set)}")
if not args.stats_only:
train_set.to_csv(train_filename,columns=['filename'],index=False,header=False, sep='\n')
if not args.stats_only:
with open(f"config/{args.name}{base[:-1]}.data",'w') as f:
f.write('classes= 1\n')
f.write('train='+train_filename + '\n')
f.write('valid='+test_filename + '\n')
f.write('base_dir='+join(args.output_dir,'') + '\n')
f.write('names='+join(args.output_dir,'classes.names') + '\n')
|
osaukh/pollenpub | code/test.py | <filename>code/test.py<gh_stars>1-10
'''Copyright (C) 2019 <NAME>, Swiss Federal Institute of Technology (ETH Zurich), <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Changes to original version: Adopted to work with multi-layer pollen data
'''
from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
import os
from shutil import copyfile
from pathlib import Path
import sys
import time
import datetime
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import Sampler
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
import matplotlib
print(matplotlib.get_backend())
matplotlib.use('agg')
import copy
import cv2
"""
Test a sample of microscope, consisting of multiple stacks.
"""
class GroupSampler(Sampler):
r"""Creates a batch for each sample (one sample consisting of multiple layers)
Args:
filenames (List): List of filenames of the dataset
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, filenames):
img_files_df = pd.DataFrame(filenames,columns=['filename'])
img_files_df['index'] = np.arange(len(filenames))
print(img_files_df.head())
img_files_df.sort_values('filename')
img_files_df['group'] = img_files_df['filename'].apply(lambda x: os.path.splitext(os.path.basename(x))[0].split('-')[0])
self.df = img_files_df
self.groups = img_files_df['group'].unique()
def __iter__(self):
batch = []
for idx in range(len(self.groups)):
group = self.groups[idx]
elements = self.df[self.df['group']==group]
for i in range(len(elements)):
index = elements.iloc[i]['index']
batch.append(index)
yield batch
batch = []
def __len__(self):
return len(self.groups)
def merge_bb(box1, box2, iou_threshold, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
iou, box_index = iou.max(0)
if iou >= iou_threshold:
return [inter_rect_x1[box_index],inter_rect_y1[box_index],inter_rect_x2[box_index],inter_rect_y2[box_index]]
else:
return None
return iou
def target_merge(targets,iou_threshold):
if targets.shape[0] == 0:
return targets
ts = targets[0].unsqueeze(0)
targets = targets[1:]
for i in range(len(targets)):
length = len(ts)
overlap_found = False
for j in range(length):
bbox = merge_bb(targets[i,2:].unsqueeze(0), ts[j,2:].unsqueeze(0),iou_threshold)
label_0 = targets[i,1]
label_1 = ts[j,1]
if bbox is not None and label_0 == label_1:
ts[j,2:6] = torch.Tensor(bbox) # merge
overlap_found = True
if not overlap_found:
# we did not find an overlap, thus add this target to ts
ts = torch.cat((ts,targets[i].unsqueeze(0)),0) # add additional bb
ts[:,0] = 0
return ts
def output_merge(outputs,iou_threshold):
# output has shape [batch_size, number_predictions, features]
# features are 0:4 boundingboxes, 4 score, -1 label
targets = torch.cat(outputs) # [all_number_predictions, features]
if targets.shape[0] == 0:
return targets
ts = targets[0].unsqueeze(0)
targets = targets[1:]
for i in range(len(targets)):
length = len(ts)
overlap_found = False
for j in range(length):
bbox = merge_bb(targets[i,:4].unsqueeze(0), ts[j,:4].unsqueeze(0),iou_threshold)
label_0 = targets[i,-1]
label_1 = ts[j,-1]
if bbox is not None and label_0 == label_1:
ts[j,:4] = torch.Tensor(bbox) # merge
if ts[j,4] < targets[i,4]:
ts[j,4] = targets[i,4]
overlap_found = True
if not overlap_found:
# we did not find an overlap, thus add this target to ts
ts = torch.cat((ts,targets[i].unsqueeze(0)),0) # add additional bb
return ts.unsqueeze(0)
def output_non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4, area_thres=None, min_detections=1):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
if len(prediction) == 0:
return prediction
# From (center x, center y, width, height) to (x1, y1, x2, y2)
# prediction[..., :4] = xywh2xyxy(prediction[..., :4])
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
if image_pred is None:
continue
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
if area_thres is not None:
# If none are remaining => process next image
if not image_pred.size(0):
continue
x1, y1, x2, y2 = image_pred[:, 0], image_pred[:, 1], image_pred[:, 2], image_pred[:, 3]
image_area = (x2 - x1 + 1) * (y2 - y1 + 1)
# image_area = image_area.abs()
# if (image_area < area_thres[0]).sum() > 0:
# print(f'Discarded (too small) {(image_area < area_thres[0]).sum()} {image_area} {area_thres[0]}', )
image_pred = image_pred[image_area >= area_thres[0]]
image_area = image_area[image_area >= area_thres[0]]
if not image_pred.size(0):
continue
image_pred = image_pred[image_area <= area_thres[1]]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
# print(invalid.sum(),invalid.shape)
if invalid.sum() >= min_detections:
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
def target_area_threshold(targets, area_thres):
"""
"""
if targets.shape[0] == 0:
return targets
if area_thres is not None:
x1, y1, x2, y2 = targets[:, 2], targets[:, 3], targets[:, 4], targets[:, 5]
image_area = (x2 - x1 + 1) * (y2 - y1 + 1)
# image_area = image_area.abs()
targets = targets[image_area >= area_thres[0]]
target_area_threshold.num_removed += (image_area < area_thres[0]).sum()
if not targets.size(0):
return targets
image_area = image_area[image_area >= area_thres[0]]
targets = targets[image_area <= area_thres[1]]
target_area_threshold.num_removed += (image_area > area_thres[1]).sum()
return targets
target_area_threshold.num_removed=0
def target_non_max_suppression(targets, conf_thres=0.5, nms_thres=0.4, area_thres=None):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(index, label, x1, y1, x2, y2)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
if targets.shape[0] == 0:
return targets
targets[:, 0] = 0 # we are going to merge everythin into one image, thus index is always zero
targets = [targets]
output = [ torch.empty((0,6)) for _ in range(len(targets))]
for image_i, image_pred in enumerate(targets):
# # Filter out confidence scores below threshold
# image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# # If none are remaining => process next image
# if not image_pred.size(0):
# continue
if area_thres is not None:
# If none are remaining => process next image
if not image_pred.size(0):
continue
x1, y1, x2, y2 = image_pred[:, 2], image_pred[:, 3], image_pred[:, 4], image_pred[:, 5]
image_area = (x2 - x1 + 1) * (y2 - y1 + 1)
# image_area = image_area.abs()
# TODO: find proper value for area_thres
image_pred = image_pred[image_area >= area_thres[0]]
if not image_pred.size(0):
continue
image_area = image_area[image_area >= area_thres[0]]
image_pred = image_pred[image_area <= area_thres[1]]
# if (image_area > area_thres[1]).sum() > 0:
# print(f'Discarded {(image_area > area_thres[1]).sum()} ground truth since it was too large')
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
# score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
# image_pred = image_pred[(-score).argsort()]
# class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
# detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
detections = image_pred
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, 2:].unsqueeze(0), detections[:, 2:]) > nms_thres
label_match = detections[0, 1] == detections[:, 1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 0:1] * 0 + 1 # hack to get the same shape with zeros
# Merge overlapping bboxes by order of confidence
detections[0, 2:] = (weights * detections[invalid, 2:]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
output = output[0]
return output
def save_images_to_disk(path,img,detections,ground_truth,img_size,output_folder,plot_detections=True,plot_annotations=True,clip_labels=False,all_correct=None):
# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colors = [cmap(i) for i in np.linspace(0, 1, 20)]
classes = ['pollen']
# print("\nSaving images:")
# Iterate through images and save plot of detections
# for img_i, (path, img, detections) in enumerate(zip(img_paths, imgs, img_detections)):
if True:
# print("(%d) Image: '%s'" % (img_i, path))
# annotations = ground_truth[ground_truth[:, 0] == img_i][:, 1:].cpu().numpy()
# Create plot
img = np.array(Image.open(path))
plt.figure()
fig, ax = plt.subplots(1)
ax.imshow(img)
inch_size = fig.get_size_inches()
# Draw bounding boxes and labels of detections
if detections is not None and plot_detections:
# Rescale boxes to original image
detections = rescale_boxes(detections, img_size, img.shape[:2])
unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
bbox_colors = random.sample(colors, n_cls_preds)
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
# print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
box_w = x2 - x1
box_h = y2 - y1
color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
color = 'green'
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=1, edgecolor=color, facecolor="none", clip_on=clip_labels,clip_box=ax.clipbox)
# Add the bbox to the plot
ax.add_patch(bbox)
# Add label
# plt.text(
# x1,
# y1,
# s=classes[int(cls_pred)],
# color="white",
# verticalalignment="top",
# clip_box=ax.clipbox, clip_on=clip_labels,
# bbox={"color": color, "pad": 0},
# )
# Save generated image with detections
plt.axis("off")
plt.gca().xaxis.set_major_locator(NullLocator())
plt.gca().yaxis.set_major_locator(NullLocator())
# filename = path.split("/")[-1].split(".")[0]
# plt.savefig(f"{output_folder}/{filename}.png", bbox_inches="tight", pad_inches=0.0)
if ground_truth.size != 0 and plot_annotations:
ground_truth[:,1:] = rescale_boxes(ground_truth[:,1:], img_size, img.shape[:2])
for label, x1, y1, x2, y2, in ground_truth:
box_w = x2 - x1
box_h = y2 - y1
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=1, edgecolor='black', facecolor="none", clip_on=clip_labels,clip_box=ax.clipbox,)
# Add the bbox to the plot
ax.add_patch(bbox)
# Add label
# plt.text(
# x1,
# y1+box_h,
# s=classes[int(label)]+'_gt',
# color="white",
# verticalalignment="top",
# clip_box=ax.clipbox, clip_on=clip_labels,
# bbox={"color": 'black', "pad": 0},
# )
if all_correct is True:
plt.text(
0,
0,
s='good',
color="blue",
transform=ax.transAxes,
clip_box=ax.clipbox, clip_on=clip_labels,
)
elif all_correct is False:
plt.text(
0,
0,
s='bad',
color="red",
transform=ax.transAxes,
clip_box=ax.clipbox, clip_on=clip_labels,
)
filename = path.split("/")[-1].split(".")[0]
fig.set_size_inches(inch_size)
plt.savefig(f"{output_folder}/{filename}.png", bbox_inches="tight", pad_inches=0.0)
plt.close(fig)
plt.close()
def align_bb(images):
for i in len(images):
# Estimate perspective transform
# Specify the number of iterations.
number_of_iterations = 20;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
s, M = cv2.findTransformECC(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY), images[i], M, cv2.MOTION_HOMOGRAPHY,criteria,None,1)
w, h, _ = image.shape
# Align image to first image
image = cv2.warpPerspective(image, M, (h, w))
# d_x = (M[0,0]*x + M[0,1]*y + M[0,2]) / (M[2,0]*x + M[2,1]*y + M[2,2])
# d_y = (M[1,0]*x + M[1,1]*y + M[1,2]) / (M[2,0]*x + M[2,1]*y + M[2,2])
def evaluate(model,
path,
iou_thres,
conf_thres,
nms_thres,
img_size,
batch_size,
base_dir=None,
merge=False,
save_images=False,
save_for_verification=False,
blur_thres=None):
model.eval()
# Get dataloader
dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False,base_dir=base_dir,blur_thres=blur_thres)
sampler = GroupSampler(dataset.img_files)
if merge:
dataloader = torch.utils.data.DataLoader(
dataset, batch_sampler=sampler, num_workers=1, collate_fn=dataset.collate_fn
)
else:
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
imgs = [] # Stores image paths
img_detections = [] # Stores detections for each image index
target_area_threshold.num_removed = 0
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
for batch_i, (index, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
# One batch consists of all images belonging to one sample (e.g. having same timestamp)
imgs = Variable(imgs.type(Tensor), requires_grad=False)
batch_size = imgs.shape[0]
# print('Batch size',imgs.shape[0])
with torch.no_grad():
outputs = model(imgs)
# outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
length_thres = torch.Tensor([0.05*img_size, img_size]) # (min length, max length)
area_thres = torch.Tensor([0.005*img_size*img_size, img_size*img_size]) # (min length, max length)
# area_thres = length_thres*length_thres
# print(targets) 60/1200
# print(len(targets),len(outputs))
# merge targets for each sample (prediction and ground truth)
# targets = target_merge(targets,iou_thres)
targets[:, 2:] = xywh2xyxy(targets[:, 2:])
targets[:, 2:] *= img_size
# print(targets.shape)
targets = target_area_threshold(targets,area_thres)
if merge:
targets = target_non_max_suppression(targets, conf_thres=conf_thres, nms_thres=nms_thres, area_thres=area_thres)
labels += targets[:, 1].tolist()
# # print(targets.shape)
outputs[..., :4] = xywh2xyxy(outputs[..., :4])
# print(img_paths)
# print(len(outputs))
# print(len(outputs[0]))
# outputs = torch.cat(outputs,dim=0)
outputs = output_non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres,area_thres=area_thres,min_detections=0)
# print(targets)
# print(outputs)
# outputs = torch.cat(outputs)
# outputs = outputs.view(1,-1,outputs.shape[-1])
# # print(outputs.shape)
# outputs = [o for o in outputs if o is not None]
output_mask = [o is not None for o in outputs]
if merge:
# index = index[output_mask]
outputs = [o for o in outputs if o is not None]
if len(outputs):
outputs = torch.cat(outputs)
outputs = outputs.view(1,-1,outputs.shape[-1])
outputs = output_non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres,min_detections=batch_size*0.7)
# # outputs = torch.cat(outputs)
# # outputs = outputs.view(1,-1,outputs.shape[-1])
# # print(outputs.shape)
# # outputs = torch.cat(outputs).unsqueeze(0)
# # outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
if blur_thres is not None:
# for each output extract the segment
# print(len(outputs))
new_outputs = []
for i in range(imgs.shape[0]):
image = imgs[i]
# print(image.max())
# imag = cv2.cvtColor(image.permute(1,2,0).cpu().numpy(), cv2.COLOR_RGB2BGR)
# cv2.imwrite(f'../output/tmp/image_{batch_i}_{i}.png',imag*255)
output = outputs[i]
if output is None:
new_outputs += [None]
continue
mask = [False for _ in range(len(output))]
bboxes = output[:,:4].clone()
bboxes = bboxes.round()
bboxes[bboxes < 0] = 0
bboxes[bboxes >= img_size] = img_size-1
for j in range(bboxes.shape[0]):
x1, y1, x2, y2 = int(bboxes[j, 0]), int(bboxes[j, 1]), int(bboxes[j, 2]), int(bboxes[j, 3])
segments = image[:, y1:y2, x1:x2]
segments_gray = cv2.cvtColor(segments.permute(1,2,0).cpu().numpy(), cv2.COLOR_RGB2GRAY)
lvar = cv2.Laplacian(segments_gray, cv2.CV_32F, ksize=3).var()
# cv2.imwrite(f'../output/tmp/out_{batch_i}_{i}_{j}_{lvar}.png',segments_gray*255)
if lvar > blur_thres:
mask[j] = True
new_outputs += [output[mask]]
outputs = new_outputs
# cv2.Laplacian(image, cv2.CV_64F).var()
sample_metric = get_batch_statistics(copy.deepcopy(outputs), targets.clone(), iou_threshold=iou_thres)
sample_metrics += sample_metric
# Extract labels
# Rescale target
if save_images:
os.makedirs('../output/images/',exist_ok=True)
for i in range(imgs.shape[0]):
# get the test results
metric = sample_metric[i]
true_positives = metric[0]
annotations = targets[targets[:, 0] == i][:, 1:].cpu().numpy()
num_gt = len(annotations)
num_pred = len(outputs[i]) if outputs[i] is not None else 0
if np.array(true_positives).all() and num_pred == num_gt:
all_correct = True
else:
all_correct = False
# open the original files
img_path = Path(dataloader.dataset.img_files[index[i]])
labels_path = Path(dataloader.dataset.label_files[index[i]])
save_images_to_disk(dataloader.dataset.img_files[index[i]],imgs[i].cpu().numpy().copy(),outputs[i],annotations,img_size,'../output/images/',clip_labels=True,all_correct=all_correct)
if labels_path.exists():
copyfile(labels_path, '../output/images/'+labels_path.name)
# print(sample_metrics)
if save_for_verification:
if merge:
raise RuntimeError('Cannot use save_for_verification while `merge` is set to True')
os.makedirs('../output/relabel/',exist_ok=True)
os.makedirs('../output/correct/',exist_ok=True)
assert len(index)==len(sample_metric)
for i in range(len(outputs)):
metric = sample_metric[i]
true_positives = metric[0]
img_path = Path(dataloader.dataset.img_files[index[i]])
labels_path = Path(dataloader.dataset.label_files[index[i]])
# save false positives
annotations = targets[targets[:, 0] == i][:, 1:].cpu().numpy()
# print(img_path)
# print(annotations)
# print(len(true_positives))
# print(len(annotations))
if not np.array(true_positives).all() and true_positives.size > 0:
save_images_to_disk(str(img_path),imgs[i].cpu().numpy().copy(),outputs[i],annotations.copy(),img_size,'../output/relabel/',clip_labels=True)
#save_images_to_disk([str(img_path)],imgs[i:i+1].cpu().numpy().copy(),copy.deepcopy(outputs),targets.clone(),img_size,'../output/relabel/')
# copyfile(img_path, '../output/relabel/'+img_path.name)
if labels_path.exists():
copyfile(labels_path, '../output/relabel/'+labels_path.name)
# print(f'FP, copied file {img_path}')
elif np.array(true_positives).all() and true_positives.size > 0:
save_images_to_disk(str(img_path),imgs[i].cpu().numpy().copy(),outputs[i],annotations.copy(),img_size,'../output/correct/',clip_labels=True)
# save false negatives
gt = len(annotations)
num_pred = len(outputs[i]) if outputs[i] is not None else 0
if num_pred != gt:
save_images_to_disk(str(img_path),imgs[i].cpu().numpy().copy(),outputs[i],annotations.copy(),img_size,'../output/relabel/',clip_labels=True)
# save_images_to_disk([str(img_path)],imgs[i:i+1].cpu().numpy().copy(),copy.deepcopy(outputs),targets.clone(),img_size,'../output/relabel/')
# copyfile(img_path, '../output/relabel/'+img_path.name)
if labels_path.exists():
copyfile(labels_path, '../output/relabel/'+labels_path.name)
# print(f'FN, copied file {img_path}')
# if (targets[:,1]==1).any():
# break
print('Number of targets removed',target_area_threshold.num_removed)
# break
# Concatenate sample statistics
if len(sample_metrics) == 0:
true_positives, pred_scores, pred_labels = np.array([]), np.array([]), np.array([])
else:
true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
print('True positives',true_positives.sum())
precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)
return precision, recall, AP, f1, ap_class
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=16, help="size of each image batch")
parser.add_argument("--data_config", type=str, default="config/test_20190523.data", help="path to data config file")
parser.add_argument("--model_def", type=str, default="config/yolov3-pollen.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="../weights/pollen_20190526.pth", help="path to weights file")
parser.add_argument("--class_path", type=str, default="config/pollen/classes.names ", help="path to class label file")
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.5, help="iou threshold for non-maximum suppression")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--merge", action='store_true', help="if True every layer of a sample will be merged")
parser.add_argument("--intheloop", action='store_true', help="stores false positives and false negatives")
parser.add_argument("--save_images", action='store_true', help="if True every layer will be saved")
parser.add_argument("--blur_thres", type=float, default=None, help="laplacian variance threshold blurry image removal. If None, nothing will be removed")
opt = parser.parse_args()
print(opt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_config = parse_data_config(opt.data_config)
valid_path = data_config["valid"]
if "base_dir" in data_config:
base_dir_path = data_config["base_dir"]
else:
base_dir_path = None
class_names = load_classes(data_config["names"])
# Initiate model
model = Darknet(opt.model_def).to(device)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path,map_location=device))
print("Compute mAP...")
precision, recall, AP, f1, ap_class = evaluate(
model,
path=valid_path,
base_dir=base_dir_path,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
img_size=opt.img_size,
batch_size=opt.batch_size,
merge=opt.merge,
save_images=opt.save_images,
save_for_verification=opt.intheloop,
blur_thres=opt.blur_thres,
)
print("Average Precisions:")
for i, c in enumerate(ap_class):
print(f"+ Class '{c}' ({class_names[c]}) - AP: {AP[i]}")
print(f"mAP: {AP.mean()}")
print('F1', f1)
|
osaukh/pollenpub | code/utils/logger.py | <gh_stars>1-10
'''Copyright (C) 2019 <NAME>, Swiss Federal Institute of Technology (ETH Zurich), <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Changes to original version: Adopted to work with multi-layer pollen data
'''
import tensorboard_logger as tb_logger
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
# self.writer = tf.summary.FileWriter(log_dir)
self.logger = tb_logger.Logger(logdir=log_dir, flush_secs=2)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
# summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
# self.writer.add_summary(summary, step)
self.logger.log_value(tag, value, step)
def list_of_scalars_summary(self, tag_value_pairs, step):
"""Log scalar variables."""
# summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value) for tag, value in tag_value_pairs])
# self.writer.add_summary(summary, step)
for tag, value in tag_value_pairs:
self.logger.log_value(tag, value, step)
|
osaukh/pollenpub | code/utils/parse_config.py | '''Copyright (C) 2019 <NAME>, Swiss Federal Institute of Technology (ETH Zurich), <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Changes to original version: Adopted to work with multi-layer pollen data
'''
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
|
JustHitTheCore/JHtC4BSK2017 | public/phdh/generate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from random import randint
from Crypto.Util.number import isPrime, getPrime
from hashlib import sha256
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
def encrypt(key, plaintext, associated_data):
iv = os.urandom(12)
encryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv),
backend=default_backend()
).encryptor()
encryptor.authenticate_additional_data(associated_data)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return (iv, ciphertext, encryptor.tag)
def generate_smooth_prime(n_bit_size, g, smooth_bit_size=50):
while True:
n = 2
factors = {2:1}
while n.bit_length() < n_bit_size - 2*smooth_bit_size:
q = getPrime(smooth_bit_size)
n *= q
if q in factors:
factors[q] += 1
else:
factors[q] = 1
smooth_bit_size_padded = n_bit_size - n.bit_length()
while True:
q = getPrime(smooth_bit_size_padded)
if isPrime((n*q)+1):
n = (n*q)+1
if q in factors:
factors[q] += 1
else:
factors[q] = 1
break
are_primitive_roots = True
for factor, factor_power in factors.items():
if pow(g, (n-1)//(factor**factor_power), n) == 1:
are_primitive_roots = False
break
if are_primitive_roots:
return n, factors
if __name__ == "__main__":
FLAG = 'JHtC4BSK{...}'
template = '''
Alice <- p: {} -> Bob
Alice <- g: {} -> Bob
Alice -> g^a (mod p): {} -> Bob
Alice <- g^b (mod p): {} <- Bob
Alice -> aes_gcm_encrypt(key, flag): {} -> Bob
EOT
'''[1:]
g = 2
p, p_order_factors = generate_smooth_prime(1024, g, 20)
a, b = randint(2, p-1), randint(2, p-1)
A, B = pow(g, a, p), pow(g, b, p)
shared_dh_key = pow(A, b, p)
assert pow(B, a, p) == shared_dh_key
key = sha256(str(shared_dh_key)).digest()
flag_encrypted = encrypt(key, FLAG, '')
flag_encrypted = map(lambda x: x.encode('hex'), flag_encrypted)
with open('communication.txt', 'w') as f:
f.write(template.format(p, g, A, B, flag_encrypted))
|
JustHitTheCore/JHtC4BSK2017 | hosted/barsa/src/app.py | from flask import Flask
from flask import render_template
from flask import make_response
from flask import request
from flask import redirect
from flask import url_for
from flask import send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from CryptoLib import SecureKey
import sys, re, os
from random import randint
import string
#---CONFIG---
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
FLAG = '/images/fc96f22d5230d38d0a81d2efc86ef5c051ecb811b4b601e2bb18909f0064fb92.gif'
enc_key = SecureKey()
sig_key = SecureKey()
if os.path.isfile('encryption_key.sec') and os.path.isfile('signing_key.sec'):
enc_key.importKey('encryption_key.sec')
sig_key.importKey('signing_key.sec')
# print "Keys imported"
else:
print "Keys not found"
sys.exit(1)
#---MODEL---
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30), unique=True)
password = db.Column(db.String(100), unique=True)
flag = db.Column(db.String(120))
def __init__(self, username, password, flag):
self.username = username
self.password = password
self.flag = flag
def __repr__(self):
return '<User %r>' % self.username
#---CRYPTO FUNCTIONS---
def encrypt(plain):
encrypted = enc_key.encrypt(plain)
signed = sig_key.sign(encrypted)
cip = encrypted.encode('hex') + '--' + signed.encode('hex')
return cip
def decrypt(cip):
try:
encrypted, signature = map(lambda x: x.decode('hex'), cip.split('--'))
if sig_key.verify(encrypted, signature) == False:
return False
plain = enc_key.decrypt(encrypted)
return plain
except:
return False
def get_user(auth):
if auth:
username = decrypt(auth)
if username != False:
return User.query.filter_by(username=username).first()
return None
#---MAIN APP---
@app.route('/css/<path:filename>')
def send_css(filename):
root_dir = os.getcwd()
return send_from_directory(os.path.join(root_dir, 'static', 'css'), filename)
@app.route('/images/<path:filename>')
def send_images(filename):
if '/images/' + filename == FLAG:
user = get_user(request.cookies.get('auth'))
if user is None or user.username != 'admin':
return redirect(url_for('index'))
root_dir = os.getcwd()
return send_from_directory(os.path.join(root_dir, 'static', 'images'), filename)
@app.route('/robots.txt')
def send_robots():
root_dir = os.getcwd()
return send_from_directory(os.path.join(root_dir, 'static'), 'robots.txt')
@app.route('/do_not_look_at_me.zip')
def send_do_not():
root_dir = os.getcwd()
return send_from_directory(os.path.join(root_dir, 'static'), 'do_not_look_at_me.zip')
@app.route("/flag", methods=['GET'])
def flag():
fake_flag = User.query.filter_by(username='flag').first()
return render_template('index.html', user=fake_flag)
@app.route("/", methods=['GET'])
def index():
user = get_user(request.cookies.get('auth'))
return render_template('index.html', user=user)
@app.route("/register", methods=['GET', 'POST'])
def register():
if get_user(request.cookies.get('auth')):
return redirect(url_for('index'))
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
username = request.form['username']
password = request.form['password']
error = ''
if len(password) < 5:
error = "Password length must be greater than 5 chars"
if len(username) < 5:
error = "Username length must be greater than 5 chars"
if len(username) >= 25:
error = "Username length must be lower than 25 chars"
if re.search("[^a-zA-Z0-9_]", username) != None:
error = "Username in [a-zA-Z0-9_]"
if error:
return render_template('register.html', error=error)
if User.query.filter_by(username=username).first() != None:
return render_template('register.html', error='User already exists')
user_flag = '/images/mp'+str(randint(1,7))+'.gif'
password = <PASSWORD>.generate_password_hash(password)
user = User(username, password, user_flag)
db.session.add(user)
db.session.commit()
auth = encrypt(username)
resp = make_response(render_template('index.html', user=user, flash='Successfully registered'))
resp.set_cookie('auth', auth)
return resp
else:
return render_template('register.html')
@app.route("/login", methods=['GET', 'POST'])
def login():
if get_user(request.cookies.get('auth')):
return redirect(url_for('index'))
if request.method == 'POST' and 'username' in request.form and 'password' in request.form:
username = request.form['username']
password = request.form['password']
user = User.query.filter_by(username=username).first()
if user == None or not bcrypt.check_password_hash(user.password, password):
return render_template('login.html', error='Invalid username/password')
auth = encrypt(username)
resp = make_response(render_template('index.html', user=user, flash='Successfully logged in'))
resp.set_cookie('auth', auth)
return resp
else:
return render_template('login.html')
@app.route("/logout", methods=['GET'])
def logout():
if request.cookies.get('auth'):
resp = make_response(render_template('index.html', flash='You were successfully logged out'))
resp.set_cookie('auth', '', expires=0)
return resp
else:
return redirect(url_for('index'))
if __name__ == "__main__":
db.drop_all()
db.create_all()
alphabet = string.printable[:-38]
password = ''.join([alphabet[randint(0, len(alphabet)-1)] for _ in range(40)])
print "admin password:", repr(password)
password = bcrypt.generate_password_hash(password)
admin = User('admin', password, FLAG)
db.session.add(admin)
password = '<PASSWORD>'
password = <PASSWORD>.generate_password_hash(password)
fake_flag = User('flag', password, '/images/flag.gif')
db.session.add(fake_flag)
db.session.commit()
enc_key = SecureKey()
sig_key = SecureKey()
if os.path.isfile('encryption_key.sec') and os.path.isfile('signing_key.sec'):
enc_key.importKey('encryption_key.sec')
sig_key.importKey('signing_key.sec')
print "Keys imported"
else:
enc_key.generate()
enc_key.exportKey('encryption_key.sec')
sig_key.generate()
sig_key.exportKey('signing_key.sec')
print "Keys generated"
app.run(host = '0.0.0.0') |
JustHitTheCore/JHtC4BSK2017 | hosted/power_man/src/pow_test.py | <reponame>JustHitTheCore/JHtC4BSK2017
#!/usr/bin/env python3
from random import randint
print("Calculate a**b mod m")
print("You don't have a lot of time!")
for _ in range(120):
a=randint(10**100, 10**200)
b=randint(10**100, 10**200)
m=randint(10**100, 10**200)
print("a =", a)
print("b =", b)
print("m =", m)
print("ans:")
ans = int(input())
if pow(a, b, m) != ans:
print("Nope!")
exit(1)
with open("flag.txt", 'r') as fin:
print(fin.read())
|
JustHitTheCore/JHtC4BSK2017 | hosted/translatespeak/src/app.py | <reponame>JustHitTheCore/JHtC4BSK2017
# <!--
import os
import shlex
import subprocess
import logging
from uuid import uuid4
from flask import Flask, request, redirect
from googletrans import Translator
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
flag_1 = os.environ['JHtC4BSK_FIRST_FLAG']
base = """
<form action="/translate">
Translate string:<br>
<input type="text" name="translate" value=""><br/>
Source lang:<br>
<input type="text" name="src" value="pl"><br/>
Dest lang:<br>
<input type="text" name="dst" value="en"><br/>
<input type="submit" value="Submit">
</form>
"""
hear = """
"""
@app.route('/')
def root():
return base
TMP_PATH = '/tmp'
@app.route('/translate')
def translate():
string = request.args.get('translate')
dst = request.args.get('dst', 'en')
src = request.args.get('src', 'pl')
if string:
string = string[:100]
tr = Translator().translate(string, dest=dst, src=src)
fname = os.path.join(TMP_PATH, str(uuid4()))
try:
cmd = 'espeak --stdout {}'.format(shlex.quote(string))
cmd += ' > {0}'
cmd = cmd.format("'" + fname + "'")
logging.info('Trying to invoke %s' % cmd)
subprocess.check_output(cmd, shell=True, env={})
except Exception as e:
fname = None
raise e
render = base + '<br><br>Translated %s to %s' % (tr.origin, tr.text)
if fname:
render += '<br><br>Download espeak <a href="%s">here</a>' % fname
render += """
<br>
<script>
var audio = new Audio('%s');
audio.play();
</script>
""" % fname
return render
return ''
@app.route(TMP_PATH + '/<filename>')
def tmp(filename):
if 'flag' in filename: # /tmp/flag_2, /tmp/flag_3
return 'lol no'
with open(os.path.join(TMP_PATH, filename), 'rb') as f:
return f.read()
# fake server
@app.route('/robots.txt')
def robots():
return cachedfile(os.path.realpath('robots.txt'))
@app.route('/backup')
def backup():
if request.headers.get('User-Agent') != 'magic':
return redirect('https://www.youtube.com/watch?v=dQw4w9WgXcQ', code=418)
filename = request.args.get('fname', os.path.realpath(__file__))
if 'flag_3' in filename:
return 'lol no'
return base + cachedfile(filename)
cache = {}
def cachedfile(fname):
print("Requesting ", fname)
if fname not in cache:
try:
with open(fname) as f:
print('Saving file %s in cache' % fname)
cache[fname] = f.read()
except FileNotFoundError:
return '<!-- File not found, sorry --!>'
return cache[fname]
if __name__ == '__main__':
app.run(debug=True)
# --!>
|
JustHitTheCore/JHtC4BSK2017 | hosted/barsa/src/CryptoLib.py | <gh_stars>1-10
#!/usr/bin/env python
from random import randint, randrange
import subprocess, json
from hashlib import sha256
old_bin = bin
def bin(a):
return old_bin(a)[2:]
def eh(a):
try:
return a.encode('hex')
except:
return hex(a)[2:].strip('L')
def dh(a):
a = a.strip()
return ('0'*(len(a)%2) + a).decode('hex')
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def gcd(a,b,k,seed):
p=b+1
while b:
a, b = b, a%b
if int(bin(p)[:k/4+32],2) < seed:
return a
else:
return 2
def random_nbit_integer(n):
return randint(2**(n-1), 2**n-1)
def pi(b, a):
return pow(a,17,b)
def is_prime(p):
if p % 2 == 0:
return False
s = 0
d = p-1
while True:
quotient, remainder = divmod(d, 2)
if remainder == 1:
break
s += 1
d = quotient
def try_composite(a):
if pow(a, d, p) == 1:
return False
for i in range(s):
if pow(a, 2**i * d, p) == p-1:
return False
return True
for i in range(6):
a = randrange(2, p)
if try_composite(a):
return False
return True
def fast_multiply(p, qv, k, seed):
e = 65537
nv = p*qv
t = bin(nv)[:k/8]
u = bin(pi(seed, int(bin(p)[:k/4+32],2)))
u = '0'*(k/4+32 - len(u)) + u
l = bin(nv)[-(5*k)/8 + 32:]
n = int(t + u + l, 2)
q = n/p + 1 - ((n/p)%2)
while gcd(e, q-1, k, float('inf')) > 1 or not is_prime(q):
m = random_nbit_integer(k/8-40)
m += (1+(m%2)) / 2
q = q^m
n = p*q
return n
def random_nbit_prime(n):
while True:
p = random_nbit_integer(n)
if is_prime(p):
return p
class SecureKey:
def __init__(self):
self.params = {}
def importKey(self, filename):
self.params = json.loads(open(filename).read())
def encrypt(self, plain):
if 'e' not in self.params or 'n' not in self.params:
raise ValueError("import or generate key first")
plain = int(eh(plain), 16)
if plain >= self.params['n']:
raise ValueError("Too large text")
cip = pow(plain, self.params['e'], self.params['n'])
return dh(eh(cip))
def decrypt(self, cip):
if 'd' not in self.params or 'n' not in self.params:
raise ValueError("import or generate key first")
cip = int(eh(cip), 16)
if cip >= self.params['n']:
raise ValueError("Too large text")
plain = pow(cip, self.params['d'], self.params['n'])
return dh(eh(plain))
def sign(self, text):
text = sha256(text).digest()
return self.decrypt(text)
def verify(self, text, signature):
if 'e' not in self.params or 'n' not in self.params:
raise ValueError("import or generate key first")
text = sha256(text).digest()
text = int(eh(text), 16)
signature = int(eh(signature), 16)
if pow(signature, self.params['e'], self.params['n']) == text:
return True
return False
def generate(self, e=65537, size=1024):
if type(e) not in [int, long]:
raise ValueError("Bad e")
if e < 3:
raise ValueError("Too small e")
if size != 1024:
raise ValueError("Only 1024 size is implemented at the moment")
k = size
seed = 476283116406539741845175463956657874046958850596520333086272652099928678076182181180321
while True:
p = random_nbit_prime(k/2)
if gcd(e, p-1, k, seed) == 1:
break
q = random_nbit_prime(k/2)
n = fast_multiply(p,q,k,seed)
d = modinv(e, (p-1)*(n/p-1))
self.params['e'] = e
self.params['n'] = n
self.params['p'] = p
self.params['q'] = n/p
self.params['d'] = d
def exportKey(self, filename):
data = {}
if 'e' not in self.params or 'n' not in self.params:
raise ValueError("import or generate params first")
data['e'] = self.params['e']
data['n'] = self.params['n']
if 'd' in self.params and 'p' in self.params and 'q' in self.params:
data['p'] = self.params['p']
data['q'] = self.params['q']
data['d'] = self.params['d']
with open(filename, 'w') as f:
f.write(json.dumps(data))
def getParams(self):
return self.params['n'], self.params['e'], self.params['d'], self.params['p'], self.params['q']
|
gadial/qiskit-terra | qiskit/pulse/transforms/canonicalization.py | <reponame>gadial/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Basic rescheduling functions which take schedule or instructions and return new schedules."""
import warnings
from collections import defaultdict
from typing import List, Optional, Iterable, Union
import numpy as np
from qiskit.pulse import channels as chans, exceptions, instructions
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.exceptions import UnassignedDurationError
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule, ScheduleBlock, ScheduleComponent
def block_to_schedule(block: ScheduleBlock) -> Schedule:
"""Convert ``ScheduleBlock`` to ``Schedule``.
Args:
block: A ``ScheduleBlock`` to convert.
Returns:
Scheduled pulse program.
Raises:
UnassignedDurationError: When any instruction duration is not assigned.
"""
if not block.is_schedulable():
raise UnassignedDurationError(
'All instruction durations should be assigned before creating `Schedule`.'
'Please check `.parameters` to find unassigned parameter objects.')
schedule = Schedule(name=block.name, metadata=block.metadata)
for op_data in block.instructions:
if isinstance(op_data, ScheduleBlock):
context_schedule = block_to_schedule(op_data)
schedule.append(context_schedule, inplace=True)
else:
schedule.append(op_data, inplace=True)
# transform with defined policy
return block.alignment_context.align(schedule)
def compress_pulses(schedules: List[Schedule]) -> List[Schedule]:
"""Optimization pass to replace identical pulses.
Args:
schedules: Schedules to compress.
Returns:
Compressed schedules.
"""
existing_pulses = []
new_schedules = []
for schedule in schedules:
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Play):
if inst.pulse in existing_pulses:
idx = existing_pulses.index(inst.pulse)
identical_pulse = existing_pulses[idx]
new_schedule.insert(time,
instructions.Play(identical_pulse,
inst.channel,
inst.name),
inplace=True)
else:
existing_pulses.append(inst.pulse)
new_schedule.insert(time, inst, inplace=True)
else:
new_schedule.insert(time, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def flatten(program: Schedule) -> Schedule:
"""Flatten (inline) any called nodes into a Schedule tree with no nested children.
Args:
program: Pulse program to remove nested structure.
Returns:
Flatten pulse program.
Raises:
PulseError: When invalid data format is given.
"""
if isinstance(program, Schedule):
return Schedule(*program.instructions, name=program.name, metadata=program.metadata)
else:
raise PulseError(f'Invalid input program {program.__class__.__name__} is specified.')
def inline_subroutines(program: Union[Schedule, ScheduleBlock]) -> Union[Schedule, ScheduleBlock]:
"""Recursively remove call instructions and inline the respective subroutine instructions.
Assigned parameter values, which are stored in the parameter table, are also applied.
The subroutine is copied before the parameter assignment to avoid mutation problem.
Args:
program: A program which may contain the subroutine, i.e. ``Call`` instruction.
Returns:
A schedule without subroutine.
Raises:
PulseError: When input program is not valid data format.
"""
if isinstance(program, Schedule):
return _inline_schedule(program)
elif isinstance(program, ScheduleBlock):
return _inline_block(program)
else:
raise PulseError(f'Invalid program {program.__class__.__name__} is specified.')
def _inline_schedule(schedule: Schedule) -> Schedule:
"""A helper function to inline subroutine of schedule.
.. note:: If subroutine is ``ScheduleBlock`` it is converted into Schedule to get ``t0``.
"""
ret_schedule = Schedule(name=schedule.name,
metadata=schedule.metadata)
for t0, inst in schedule.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
# convert into schedule if block is given
if isinstance(subroutine, ScheduleBlock):
subroutine = block_to_schedule(subroutine)
# recursively inline the program
inline_schedule = _inline_schedule(subroutine)
ret_schedule.insert(t0, inline_schedule, inplace=True)
else:
ret_schedule.insert(t0, inst, inplace=True)
return ret_schedule
def _inline_block(block: ScheduleBlock) -> ScheduleBlock:
"""A helper function to inline subroutine of schedule block.
.. note:: If subroutine is ``Schedule`` the function raises an error.
"""
ret_block = ScheduleBlock(alignment_context=block.alignment_context,
name=block.name,
metadata=block.metadata)
for inst in block.instructions:
if isinstance(inst, instructions.Call):
# bind parameter
subroutine = inst.assigned_subroutine()
if isinstance(subroutine, Schedule):
raise PulseError(f'A subroutine {subroutine.name} is a pulse Schedule. '
'This program cannot be inserted into ScheduleBlock because '
't0 associated with instruction will be lost.')
# recursively inline the program
inline_block = _inline_block(subroutine)
ret_block.append(inline_block, inplace=True)
else:
ret_block.append(inst, inplace=True)
return ret_block
def remove_directives(schedule: Schedule) -> Schedule:
"""Remove directives.
Args:
schedule: A schedule to remove compiler directives.
Returns:
A schedule without directives.
"""
return schedule.exclude(instruction_types=[directives.Directive])
def remove_trivial_barriers(schedule: Schedule) -> Schedule:
"""Remove trivial barriers with 0 or 1 channels.
Args:
schedule: A schedule to remove trivial barriers.
Returns:
schedule: A schedule without trivial barriers
"""
def filter_func(inst):
return (isinstance(inst[1], directives.RelativeBarrier) and
len(inst[1].channels) < 2)
return schedule.exclude(filter_func)
def align_measures(schedules: Iterable[ScheduleComponent],
inst_map: Optional[InstructionScheduleMap] = None,
cal_gate: str = 'u3',
max_calibration_duration: Optional[int] = None,
align_time: Optional[int] = None,
align_all: Optional[bool] = True,
) -> List[Schedule]:
"""Return new schedules where measurements occur at the same physical time.
This transformation will align the first :class:`qiskit.pulse.Acquire` on
every channel to occur at the same time.
Minimum measurement wait time (to allow for calibration pulses) is enforced
and may be set with ``max_calibration_duration``.
By default only instructions containing a :class:`~qiskit.pulse.AcquireChannel`
or :class:`~qiskit.pulse.MeasureChannel` will be shifted. If you wish to keep
the relative timing of all instructions in the schedule set ``align_all=True``.
This method assumes that ``MeasureChannel(i)`` and ``AcquireChannel(i)``
correspond to the same qubit and the acquire/play instructions
should be shifted together on these channels.
.. jupyter-kernel:: python3
:id: align_measures
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import transforms
with pulse.build() as sched:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 0.5), pulse.DriveChannel(0))
pulse.play(pulse.Constant(10, 1.), pulse.MeasureChannel(0))
pulse.acquire(20, pulse.AcquireChannel(0), pulse.MemorySlot(0))
sched_shifted = sched << 20
aligned_sched, aligned_sched_shifted = transforms.align_measures([sched, sched_shifted])
assert aligned_sched == aligned_sched_shifted
If it is desired to only shift acquisition and measurement stimulus instructions
set the flag ``align_all=False``:
.. jupyter-execute::
aligned_sched, aligned_sched_shifted = transforms.align_measures(
[sched, sched_shifted],
align_all=False,
)
assert aligned_sched != aligned_sched_shifted
Args:
schedules: Collection of schedules to be aligned together
inst_map: Mapping of circuit operations to pulse schedules
cal_gate: The name of the gate to inspect for the calibration time
max_calibration_duration: If provided, inst_map and cal_gate will be ignored
align_time: If provided, this will be used as final align time.
align_all: Shift all instructions in the schedule such that they maintain
their relative alignment with the shifted acquisition instruction.
If ``False`` only the acquisition and measurement pulse instructions
will be shifted.
Returns:
The input list of schedules transformed to have their measurements aligned.
Raises:
PulseError: If the provided alignment time is negative.
"""
def get_first_acquire_times(schedules):
"""Return a list of first acquire times for each schedule."""
acquire_times = []
for schedule in schedules:
visited_channels = set()
qubit_first_acquire_times = defaultdict(lambda: None)
for time, inst in schedule.instructions:
if (isinstance(inst, instructions.Acquire) and
inst.channel not in visited_channels):
visited_channels.add(inst.channel)
qubit_first_acquire_times[inst.channel.index] = time
acquire_times.append(qubit_first_acquire_times)
return acquire_times
def get_max_calibration_duration(inst_map, cal_gate):
"""Return the time needed to allow for readout discrimination calibration pulses."""
# TODO (qiskit-terra #5472): fix behavior of this.
max_calibration_duration = 0
for qubits in inst_map.qubits_with_instruction(cal_gate):
cmd = inst_map.get(cal_gate, qubits, np.pi, 0, np.pi)
max_calibration_duration = max(cmd.duration, max_calibration_duration)
return max_calibration_duration
if align_time is not None and align_time < 0:
raise exceptions.PulseError("Align time cannot be negative.")
first_acquire_times = get_first_acquire_times(schedules)
# Extract the maximum acquire in every schedule across all acquires in the schedule.
# If there are no acquires in the schedule default to 0.
max_acquire_times = [max(0, *times.values()) for times in first_acquire_times]
if align_time is None:
if max_calibration_duration is None:
if inst_map:
max_calibration_duration = get_max_calibration_duration(inst_map, cal_gate)
else:
max_calibration_duration = 0
align_time = max(max_calibration_duration, *max_acquire_times)
# Shift acquires according to the new scheduled time
new_schedules = []
for sched_idx, schedule in enumerate(schedules):
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
stop_time = schedule.stop_time
if align_all:
if first_acquire_times[sched_idx]:
shift = align_time - max_acquire_times[sched_idx]
else:
shift = align_time - stop_time
else:
shift = 0
for time, inst in schedule.instructions:
measurement_channels = {
chan.index for chan in inst.channels if
isinstance(chan, (chans.MeasureChannel, chans.AcquireChannel))
}
if measurement_channels:
sched_first_acquire_times = first_acquire_times[sched_idx]
max_start_time = max(sched_first_acquire_times[chan]
for chan in measurement_channels if
chan in sched_first_acquire_times)
shift = align_time - max_start_time
if shift < 0:
warnings.warn(
"The provided alignment time is scheduling an acquire instruction "
"earlier than it was scheduled for in the original Schedule. "
"This may result in an instruction being scheduled before t=0 and "
"an error being raised."
)
new_schedule.insert(time+shift, inst, inplace=True)
new_schedules.append(new_schedule)
return new_schedules
def add_implicit_acquires(schedule: ScheduleComponent,
meas_map: List[List[int]]
) -> Schedule:
"""Return a new schedule with implicit acquires from the measurement mapping replaced by
explicit ones.
.. warning:: Since new acquires are being added, Memory Slots will be set to match the
qubit index. This may overwrite your specification.
Args:
schedule: Schedule to be aligned.
meas_map: List of lists of qubits that are measured together.
Returns:
A ``Schedule`` with the additional acquisition instructions.
"""
new_schedule = Schedule(name=schedule.name, metadata=schedule.metadata)
acquire_map = dict()
for time, inst in schedule.instructions:
if isinstance(inst, instructions.Acquire):
if inst.mem_slot and inst.mem_slot.index != inst.channel.index:
warnings.warn("One of your acquires was mapped to a memory slot which didn't match"
" the qubit index. I'm relabeling them to match.")
# Get the label of all qubits that are measured with the qubit(s) in this instruction
all_qubits = []
for sublist in meas_map:
if inst.channel.index in sublist:
all_qubits.extend(sublist)
# Replace the old acquire instruction by a new one explicitly acquiring all qubits in
# the measurement group.
for i in all_qubits:
explicit_inst = instructions.Acquire(inst.duration,
chans.AcquireChannel(i),
mem_slot=chans.MemorySlot(i),
kernel=inst.kernel,
discriminator=inst.discriminator)
if time not in acquire_map:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map = {time: {i}}
elif i not in acquire_map[time]:
new_schedule.insert(time, explicit_inst, inplace=True)
acquire_map[time].add(i)
else:
new_schedule.insert(time, inst, inplace=True)
return new_schedule
|
gadial/qiskit-terra | qiskit/pulse/instructions/delay.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An instruction for blocking time on a channel; useful for scheduling alignment."""
from typing import Optional, Union, Tuple
from qiskit.circuit import ParameterExpression
from qiskit.pulse.channels import Channel
from qiskit.pulse.instructions.instruction import Instruction
class Delay(Instruction):
"""A blocking instruction with no other effect. The delay is used for aligning and scheduling
other instructions.
Example:
To schedule an instruction at time = 10, on a channel assigned to the variable ``channel``,
the following could be used::
sched = Schedule(name="Delay instruction example")
sched += Delay(10, channel)
sched += Gaussian(duration, amp, sigma, channel)
The ``channel`` will output no signal from time=0 up until time=10.
"""
def __init__(self, duration: Union[int, ParameterExpression],
channel: Channel,
name: Optional[str] = None):
"""Create a new delay instruction.
No other instruction may be scheduled within a ``Delay``.
Args:
duration: Length of time of the delay in terms of dt.
channel: The channel that will have the delay.
name: Name of the delay for display purposes.
"""
super().__init__(operands=(duration, channel), name=name)
@property
def channel(self) -> Channel:
"""Return the :py:class:`~qiskit.pulse.channels.Channel` that this instruction is
scheduled on.
"""
return self.operands[1]
@property
def channels(self) -> Tuple[Channel]:
"""Returns the channels that this schedule uses."""
return (self.channel, )
@property
def duration(self) -> Union[int, ParameterExpression]:
"""Duration of this instruction."""
return self.operands[0]
def is_parameterized(self) -> bool:
"""Return ``True`` iff the instruction is parameterized."""
return isinstance(self.duration, ParameterExpression) or super().is_parameterized()
|
gadial/qiskit-terra | qiskit/quantum_info/operators/channel/kraus.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Kraus representation of a Quantum Channel.
"""
import copy
from numbers import Number
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.instruction import Instruction
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_identity_matrix
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.op_shape import OpShape
from qiskit.quantum_info.operators.channel.choi import Choi
from qiskit.quantum_info.operators.channel.superop import SuperOp
from qiskit.quantum_info.operators.channel.transformations import _to_kraus
from qiskit.quantum_info.operators.mixins import generate_apidocs
class Kraus(QuantumChannel):
r"""Kraus representation of a quantum channel.
For a quantum channel :math:`\mathcal{E}`, the Kraus representation is
given by a set of matrices :math:`[A_0,...,A_{K-1}]` such that the
evolution of a :class:`~qiskit.quantum_info.DensityMatrix`
:math:`\rho` is given by
.. math::
\mathcal{E}(\rho) = \sum_{i=0}^{K-1} A_i \rho A_i^\dagger
A general operator map :math:`\mathcal{G}` can also be written using the
generalized Kraus representation which is given by two sets of matrices
:math:`[A_0,...,A_{K-1}]`, :math:`[B_0,...,A_{B-1}]` such that
.. math::
\mathcal{G}(\rho) = \sum_{i=0}^{K-1} A_i \rho B_i^\dagger
See reference [1] for further details.
References:
1. <NAME>, <NAME>, <NAME>, *Tensor networks and graphical calculus
for open quantum systems*, Quant. Inf. Comp. 15, 0579-0811 (2015).
`arXiv:1111.6950 [quant-ph] <https://arxiv.org/abs/1111.6950>`_
"""
def __init__(self, data, input_dims=None, output_dims=None):
"""Initialize a quantum channel Kraus operator.
Args:
data (QuantumCircuit or
Instruction or
BaseOperator or
matrix): data to initialize superoperator.
input_dims (tuple): the input subsystem dimensions.
[Default: None]
output_dims (tuple): the output subsystem dimensions.
[Default: None]
Raises:
QiskitError: if input data cannot be initialized as a
a list of Kraus matrices.
Additional Information:
If the input or output dimensions are None, they will be
automatically determined from the input data. If the input data is
a list of Numpy arrays of shape (2**N, 2**N) qubit systems will be
used. If the input does not correspond to an N-qubit channel, it
will assign a single subsystem with dimension specified by the
shape of the input.
"""
# If the input is a list or tuple we assume it is a list of Kraus
# matrices, if it is a numpy array we assume that it is a single Kraus
# operator
if isinstance(data, (list, tuple, np.ndarray)):
# Check if it is a single unitary matrix A for channel:
# E(rho) = A * rho * A^\dagger
if isinstance(data, np.ndarray) or np.array(data).ndim == 2:
# Convert single Kraus op to general Kraus pair
kraus = ([np.asarray(data, dtype=complex)], None)
shape = kraus[0][0].shape
# Check if single Kraus set [A_i] for channel:
# E(rho) = sum_i A_i * rho * A_i^dagger
elif isinstance(data, list) and len(data) > 0:
# Get dimensions from first Kraus op
kraus = [np.asarray(data[0], dtype=complex)]
shape = kraus[0].shape
# Iterate over remaining ops and check they are same shape
for i in data[1:]:
op = np.asarray(i, dtype=complex)
if op.shape != shape:
raise QiskitError(
"Kraus operators are different dimensions.")
kraus.append(op)
# Convert single Kraus set to general Kraus pair
kraus = (kraus, None)
# Check if generalized Kraus set ([A_i], [B_i]) for channel:
# E(rho) = sum_i A_i * rho * B_i^dagger
elif isinstance(data,
tuple) and len(data) == 2 and len(data[0]) > 0:
kraus_left = [np.asarray(data[0][0], dtype=complex)]
shape = kraus_left[0].shape
for i in data[0][1:]:
op = np.asarray(i, dtype=complex)
if op.shape != shape:
raise QiskitError(
"Kraus operators are different dimensions.")
kraus_left.append(op)
if data[1] is None:
kraus = (kraus_left, None)
else:
kraus_right = []
for i in data[1]:
op = np.asarray(i, dtype=complex)
if op.shape != shape:
raise QiskitError(
"Kraus operators are different dimensions.")
kraus_right.append(op)
kraus = (kraus_left, kraus_right)
else:
raise QiskitError("Invalid input for Kraus channel.")
op_shape = OpShape.auto(dims_l=output_dims, dims_r=input_dims,
shape=kraus[0][0].shape)
else:
# Otherwise we initialize by conversion from another Qiskit
# object into the QuantumChannel.
if isinstance(data, (QuantumCircuit, Instruction)):
# If the input is a Terra QuantumCircuit or Instruction we
# convert it to a SuperOp
data = SuperOp._init_instruction(data)
else:
# We use the QuantumChannel init transform to initialize
# other objects into a QuantumChannel or Operator object.
data = self._init_transformer(data)
op_shape = data._op_shape
output_dim, input_dim = op_shape.shape
# Now that the input is an operator we convert it to a Kraus
rep = getattr(data, '_channel_rep', 'Operator')
kraus = _to_kraus(rep, data._data, input_dim, output_dim)
# Initialize either single or general Kraus
if kraus[1] is None or np.allclose(kraus[0], kraus[1]):
# Standard Kraus map
data = (kraus[0], None)
else:
# General (non-CPTP) Kraus map
data = kraus
super().__init__(data, op_shape=op_shape)
@property
def data(self):
"""Return list of Kraus matrices for channel."""
if self._data[1] is None:
# If only a single Kraus set, don't return the tuple
# Just the fist set
return self._data[0]
else:
# Otherwise return the tuple of both kraus sets
return self._data
def is_cptp(self, atol=None, rtol=None):
"""Return True if completely-positive trace-preserving."""
if self._data[1] is not None:
return False
if atol is None:
atol = self.atol
if rtol is None:
rtol = self.rtol
accum = 0j
for op in self._data[0]:
accum += np.dot(np.transpose(np.conj(op)), op)
return is_identity_matrix(accum, rtol=rtol, atol=atol)
def _evolve(self, state, qargs=None):
return SuperOp(self)._evolve(state, qargs)
# ---------------------------------------------------------------------
# BaseOperator methods
# ---------------------------------------------------------------------
def conjugate(self):
ret = copy.copy(self)
kraus_l, kraus_r = self._data
kraus_l = [np.conj(k) for k in kraus_l]
if kraus_r is not None:
kraus_r = [k.conj() for k in kraus_r]
ret._data = (kraus_l, kraus_r)
return ret
def transpose(self):
ret = copy.copy(self)
ret._op_shape = self._op_shape.transpose()
kraus_l, kraus_r = self._data
kraus_l = [np.transpose(k) for k in kraus_l]
if kraus_r is not None:
kraus_r = [np.transpose(k) for k in kraus_r]
ret._data = (kraus_l, kraus_r)
return ret
def adjoint(self):
ret = copy.copy(self)
ret._op_shape = self._op_shape.transpose()
kraus_l, kraus_r = self._data
kraus_l = [np.conj(np.transpose(k)) for k in kraus_l]
if kraus_r is not None:
kraus_r = [np.conj(np.transpose(k)) for k in kraus_r]
ret._data = (kraus_l, kraus_r)
return ret
def compose(self, other, qargs=None, front=False):
if qargs is None:
qargs = getattr(other, 'qargs', None)
if qargs is not None:
return Kraus(
SuperOp(self).compose(other, qargs=qargs, front=front))
if not isinstance(other, Kraus):
other = Kraus(other)
new_shape = self._op_shape.compose(other._op_shape, qargs, front)
input_dims = new_shape.dims_r()
output_dims = new_shape.dims_l()
if front:
ka_l, ka_r = self._data
kb_l, kb_r = other._data
else:
ka_l, ka_r = other._data
kb_l, kb_r = self._data
kab_l = [np.dot(a, b) for a in ka_l for b in kb_l]
if ka_r is None and kb_r is None:
kab_r = None
elif ka_r is None:
kab_r = [np.dot(a, b) for a in ka_l for b in kb_r]
elif kb_r is None:
kab_r = [np.dot(a, b) for a in ka_r for b in kb_l]
else:
kab_r = [np.dot(a, b) for a in ka_r for b in kb_r]
ret = Kraus((kab_l, kab_r), input_dims, output_dims)
ret._op_shape = new_shape
return ret
def tensor(self, other):
if not isinstance(other, Kraus):
other = Kraus(other)
return self._tensor(self, other)
def expand(self, other):
if not isinstance(other, Kraus):
other = Kraus(other)
return self._tensor(other, self)
@classmethod
def _tensor(cls, a, b):
ret = copy.copy(a)
ret._op_shape = a._op_shape.tensor(b._op_shape)
# Get tensor matrix
ka_l, ka_r = a._data
kb_l, kb_r = b._data
kab_l = [np.kron(ka, kb) for ka in ka_l for kb in kb_l]
if ka_r is None and kb_r is None:
kab_r = None
else:
if ka_r is None:
ka_r = ka_l
if kb_r is None:
kb_r = kb_l
kab_r = [np.kron(a, b) for a in ka_r for b in kb_r]
ret._data = (kab_l, kab_r)
return ret
def __add__(self, other):
qargs = getattr(other, 'qargs', None)
if not isinstance(other, QuantumChannel):
other = Choi(other)
return self._add(other, qargs=qargs)
def __sub__(self, other):
qargs = getattr(other, 'qargs', None)
if not isinstance(other, QuantumChannel):
other = Choi(other)
return self._add(-other, qargs=qargs)
def _add(self, other, qargs=None):
# Since we cannot directly add two channels in the Kraus
# representation we try and use the other channels method
# or convert to the Choi representation
return Kraus(Choi(self)._add(other, qargs=qargs))
def _multiply(self, other):
if not isinstance(other, Number):
raise QiskitError("other is not a number")
ret = copy.copy(self)
# If the number is complex we need to convert to general
# kraus channel so we multiply via Choi representation
if isinstance(other, complex) or other < 0:
# Convert to Choi-matrix
ret._data = Kraus(Choi(self)._multiply(other))._data
return ret
# If the number is real we can update the Kraus operators
# directly
val = np.sqrt(other)
kraus_r = None
kraus_l = [val * k for k in self._data[0]]
if self._data[1] is not None:
kraus_r = [val * k for k in self._data[1]]
ret._data = (kraus_l, kraus_r)
return ret
# Update docstrings for API docs
generate_apidocs(Kraus)
|
gadial/qiskit-terra | qiskit/pulse/library/discrete.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-return-doc, invalid-name
"""Module for builtin discrete pulses.
Note the sampling strategy use for all discrete pulses is ``midpoint``.
"""
from typing import Optional
from ..exceptions import PulseError
from .waveform import Waveform
from . import continuous
from . import samplers
_sampled_constant_pulse = samplers.midpoint(continuous.constant)
def constant(duration: int, amp: complex, name: Optional[str] = None) -> Waveform:
r"""Generates constant-sampled :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, samples from the function:
.. math::
f(x) = A
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Complex pulse amplitude.
name: Name of pulse.
"""
return _sampled_constant_pulse(duration, amp, name=name)
_sampled_zero_pulse = samplers.midpoint(continuous.zero)
def zero(duration: int, name: Optional[str] = None) -> Waveform:
"""Generates zero-sampled :class:`~qiskit.pulse.Waveform`.
Samples from the function:
.. math::
f(x) = 0
Args:
duration: Duration of pulse. Must be greater than zero.
name: Name of pulse.
"""
return _sampled_zero_pulse(duration, name=name)
_sampled_square_pulse = samplers.midpoint(continuous.square)
def square(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: Optional[str] = None) -> Waveform:
r"""Generates square wave :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, :math:`T=` ``period``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = A \text{sign}\left[ \sin\left(\frac{2 \pi x}{T} + 2\phi\right) \right]
with the convention :math:`\text{sign}(0) = 1`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is :math:`[-` ``amp`` :math:`,` ``amp`` :math:`]`.
freq: Pulse frequency, units of 1./dt. If ``None`` defaults to 1./duration.
phase: Pulse phase.
name: Name of pulse.
"""
if freq is None:
freq = 1./duration
return _sampled_square_pulse(duration, amp, freq, phase=phase, name=name)
_sampled_sawtooth_pulse = samplers.midpoint(continuous.sawtooth)
def sawtooth(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: Optional[str] = None) -> Waveform:
r"""Generates sawtooth wave :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, :math:`T=` ``period``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = 2 A \left( g(x) - \left\lfloor \frac{1}{2} + g(x) \right\rfloor\right)
where :math:`g(x) = x/T + \phi/\pi`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is :math:`[-` ``amp`` :math:`,` ``amp`` :math:`]`.
freq: Pulse frequency, units of 1./dt. If ``None`` defaults to 1./duration.
phase: Pulse phase.
name: Name of pulse.
Example:
.. jupyter-execute::
import matplotlib.pyplot as plt
from qiskit.pulse.library import sawtooth
import numpy as np
duration = 100
amp = 1
freq = 1 / duration
sawtooth_wave = np.real(sawtooth(duration, amp, freq).samples)
plt.plot(range(duration), sawtooth_wave)
"""
if freq is None:
freq = 1./duration
return _sampled_sawtooth_pulse(duration, amp, freq, phase=phase, name=name)
_sampled_triangle_pulse = samplers.midpoint(continuous.triangle)
def triangle(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: Optional[str] = None) -> Waveform:
r"""Generates triangle wave :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, :math:`T=` ``period``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = A \left(-2\left|\text{sawtooth}(x, A, T, \phi)\right| + 1\right)
This a non-sinusoidal wave with linear ramping.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is :math:`[-` ``amp`` :math:`,` ``amp`` :math:`]`.
freq: Pulse frequency, units of 1./dt. If ``None`` defaults to 1./duration.
phase: Pulse phase.
name: Name of pulse.
Example:
.. jupyter-execute::
import matplotlib.pyplot as plt
from qiskit.pulse.library import triangle
import numpy as np
duration = 100
amp = 1
freq = 1 / duration
triangle_wave = np.real(triangle(duration, amp, freq).samples)
plt.plot(range(duration), triangle_wave)
"""
if freq is None:
freq = 1./duration
return _sampled_triangle_pulse(duration, amp, freq, phase=phase, name=name)
_sampled_cos_pulse = samplers.midpoint(continuous.cos)
def cos(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: Optional[str] = None) -> Waveform:
r"""Generates cosine wave :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, :math:`\omega=` ``freq``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = A \cos(2 \pi \omega x + \phi)
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt. If ``None`` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
"""
if freq is None:
freq = 1/duration
return _sampled_cos_pulse(duration, amp, freq, phase=phase, name=name)
_sampled_sin_pulse = samplers.midpoint(continuous.sin)
def sin(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: Optional[str] = None) -> Waveform:
r"""Generates sine wave :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, :math:`\omega=` ``freq``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = A \sin(2 \pi \omega x + \phi)
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt. If ``None`` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
"""
if freq is None:
freq = 1/duration
return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name)
_sampled_gaussian_pulse = samplers.midpoint(continuous.gaussian)
def gaussian(duration: int, amp: complex, sigma: float, name: Optional[str] = None,
zero_ends: bool = True) -> Waveform:
r"""Generates unnormalized gaussian :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp`` and :math:`\sigma=` ``sigma``, applies the ``midpoint`` sampling strategy
to generate a discrete pulse sampled from the continuous function:
.. math::
f(x) = A\exp\left(\left(\frac{x - \mu}{2\sigma}\right)^2 \right),
with the center :math:`\mu=` ``duration/2``.
If ``zero_ends==True``, each output sample :math:`y` is modified according to:
.. math::
y \mapsto A\frac{y-y^*}{A-y^*},
where :math:`y^*` is the value of the endpoint samples. This sets the endpoints
to :math:`0` while preserving the amplitude at the center. If :math:`A=y^*`,
:math:`y` is set to :math:`1`. By default, the endpoints are at ``x = -1, x = duration + 1``.
Integrated area under the full curve is ``amp * np.sqrt(2*np.pi*sigma**2)``
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude at ``duration/2``.
sigma: Width (standard deviation) of pulse.
name: Name of pulse.
zero_ends: If True, zero ends at ``x = -1, x = duration + 1``, but rescale to preserve amp.
"""
center = duration/2
zeroed_width = duration + 2 if zero_ends else None
rescale_amp = bool(zero_ends)
return _sampled_gaussian_pulse(duration, amp, center, sigma,
zeroed_width=zeroed_width, rescale_amp=rescale_amp,
name=name)
_sampled_gaussian_deriv_pulse = samplers.midpoint(continuous.gaussian_deriv)
def gaussian_deriv(duration: int, amp: complex, sigma: float,
name: Optional[str] = None) -> Waveform:
r"""Generates unnormalized gaussian derivative :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp`` and :math:`\sigma=` ``sigma`` applies the `midpoint` sampling strategy
to generate a discrete pulse sampled from the continuous function:
.. math::
f(x) = A\frac{(x - \mu)}{\sigma^2}\exp\left(\left(\frac{x - \mu}{2\sigma}\right)^2 \right)
i.e. the derivative of the Gaussian function, with center :math:`\mu=` ``duration/2``.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude of corresponding Gaussian at the pulse center (``duration/2``).
sigma: Width (standard deviation) of pulse.
name: Name of pulse.
"""
center = duration/2
return _sampled_gaussian_deriv_pulse(duration, amp, center, sigma, name=name)
_sampled_sech_pulse = samplers.midpoint(continuous.sech)
def sech(duration: int, amp: complex, sigma: float, name: str = None,
zero_ends: bool = True) -> Waveform:
r"""Generates unnormalized sech :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp`` and :math:`\sigma=` ``sigma``, applies the ``midpoint`` sampling strategy
to generate a discrete pulse sampled from the continuous function:
.. math::
f(x) = A\text{sech}\left(\frac{x-\mu}{\sigma} \right)
with the center :math:`\mu=` ``duration/2``.
If ``zero_ends==True``, each output sample :math:`y` is modified according to:
.. math::
y \mapsto A\frac{y-y^*}{A-y^*},
where :math:`y^*` is the value of the endpoint samples. This sets the endpoints
to :math:`0` while preserving the amplitude at the center. If :math:`A=y^*`,
:math:`y` is set to :math:`1`. By default, the endpoints are at ``x = -1, x = duration + 1``.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude at `duration/2`.
sigma: Width (standard deviation) of pulse.
name: Name of pulse.
zero_ends: If True, zero ends at ``x = -1, x = duration + 1``, but rescale to preserve amp.
"""
center = duration/2
zeroed_width = duration + 2 if zero_ends else None
rescale_amp = bool(zero_ends)
return _sampled_sech_pulse(duration, amp, center, sigma,
zeroed_width=zeroed_width, rescale_amp=rescale_amp,
name=name)
_sampled_sech_deriv_pulse = samplers.midpoint(continuous.sech_deriv)
def sech_deriv(duration: int, amp: complex, sigma: float, name: str = None) -> Waveform:
r"""Generates unnormalized sech derivative :class:`~qiskit.pulse.Waveform`.
For :math:`A=` ``amp``, :math:`\sigma=` ``sigma``, and center :math:`\mu=` ``duration/2``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = \frac{d}{dx}\left[A\text{sech}\left(\frac{x-\mu}{\sigma} \right)\right],
i.e. the derivative of :math:`\text{sech}`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude at `center`.
sigma: Width (standard deviation) of pulse.
name: Name of pulse.
"""
center = duration/2
return _sampled_sech_deriv_pulse(duration, amp, center, sigma, name=name)
_sampled_gaussian_square_pulse = samplers.midpoint(continuous.gaussian_square)
def gaussian_square(duration: int, amp: complex, sigma: float,
risefall: Optional[float] = None, width: Optional[float] = None,
name: Optional[str] = None, zero_ends: bool = True) -> Waveform:
r"""Generates gaussian square :class:`~qiskit.pulse.Waveform`.
For :math:`d=` ``duration``, :math:`A=` ``amp``, :math:`\sigma=` ``sigma``,
and :math:`r=` ``risefall``, applies the ``midpoint`` sampling strategy to
generate a discrete pulse sampled from the continuous function:
.. math::
f(x) = \begin{cases}
g(x - r) ) & x\leq r \\
A & r\leq x\leq d-r \\
g(x - (d - r)) & d-r\leq x
\end{cases}
where :math:`g(x)` is the Gaussian function sampled from in :meth:`gaussian`
with :math:`A=` ``amp``, :math:`\mu=1`, and :math:`\sigma=` ``sigma``. I.e.
:math:`f(x)` represents a square pulse with smooth Gaussian edges.
If ``zero_ends == True``, the samples for the Gaussian ramps are remapped as in
:meth:`gaussian`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
sigma: Width (standard deviation) of Gaussian rise/fall portion of the pulse.
risefall: Number of samples over which pulse rise and fall happen. Width of
square portion of pulse will be ``duration-2*risefall``.
width: The duration of the embedded square pulse. Only one of ``width`` or ``risefall``
should be specified as the functional form requires
``width = duration - 2 * risefall``.
name: Name of pulse.
zero_ends: If True, zero ends at ``x = -1, x = duration + 1``, but rescale to preserve amp.
Raises:
PulseError: If ``risefall`` and ``width`` arguments are inconsistent or not enough info.
"""
if risefall is None and width is None:
raise PulseError("gaussian_square missing required argument: 'width' or 'risefall'.")
if risefall is not None:
if width is None:
width = duration - 2 * risefall
elif 2 * risefall + width != duration:
raise PulseError("Both width and risefall were specified, and they are "
"inconsistent: 2 * risefall + width == {} != "
"duration == {}.".format(2 * risefall + width, duration))
center = duration / 2
zeroed_width = duration + 2 if zero_ends else None
return _sampled_gaussian_square_pulse(duration, amp, center, width, sigma,
zeroed_width=zeroed_width, name=name)
_sampled_drag_pulse = samplers.midpoint(continuous.drag)
def drag(duration: int, amp: complex, sigma: float, beta: float,
name: Optional[str] = None, zero_ends: bool = True) -> Waveform:
r"""Generates Y-only correction DRAG :class:`~qiskit.pulse.Waveform` for standard nonlinear
oscillator (SNO) [1].
For :math:`A=` ``amp``, :math:`\sigma=` ``sigma``, and :math:`\beta=` ``beta``, applies the
``midpoint`` sampling strategy to generate a discrete pulse sampled from the
continuous function:
.. math::
f(x) = g(x) + i \beta h(x),
where :math:`g(x)` is the function sampled in :meth:`gaussian`, and :math:`h(x)`
is the function sampled in :meth:`gaussian_deriv`.
If ``zero_ends == True``, the samples from :math:`g(x)` are remapped as in :meth:`gaussian`.
References:
1. |citation1|_
.. _citation1: http://dx.doi.org/10.1103/PhysRevA.83.012308
.. |citation1| replace:: *<NAME>., <NAME>., <NAME>. & <NAME>.
"Analytic control methods for high-fidelity unitary operations
in a weakly nonlinear oscillator." Phys. Rev. A 83, 012308 (2011).*
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude at center ``duration/2``.
sigma: Width (standard deviation) of pulse.
beta: Y correction amplitude. For the SNO this is
:math:`\beta=-\frac{\lambda_1^2}{4\Delta_2}`. Where :math:`\lambda_1` is the
relative coupling strength between the first excited and second excited states
and :math:`\Delta_2` is the detuning between the respective excited states.
name: Name of pulse.
zero_ends: If True, zero ends at ``x = -1, x = duration + 1``, but rescale to preserve amp.
"""
center = duration/2
zeroed_width = duration + 2 if zero_ends else None
rescale_amp = bool(zero_ends)
return _sampled_drag_pulse(duration, amp, center, sigma, beta,
zeroed_width=zeroed_width, rescale_amp=rescale_amp,
name=name)
|
gadial/qiskit-terra | qiskit/quantum_info/operators/channel/transformations.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=too-many-return-statements,unpacking-non-sequence
"""
Transformations between QuantumChannel representations.
"""
import numpy as np
import scipy.linalg as la
from qiskit.exceptions import QiskitError
from qiskit.quantum_info.operators.predicates import is_hermitian_matrix
from qiskit.quantum_info.operators.predicates import ATOL_DEFAULT
def _transform_rep(input_rep, output_rep, data, input_dim, output_dim):
"""Transform a QuantumChannel between representation."""
if input_rep == output_rep:
return data
if output_rep == 'Choi':
return _to_choi(input_rep, data, input_dim, output_dim)
if output_rep == 'Operator':
return _to_operator(input_rep, data, input_dim, output_dim)
if output_rep == 'SuperOp':
return _to_superop(input_rep, data, input_dim, output_dim)
if output_rep == 'Kraus':
return _to_kraus(input_rep, data, input_dim, output_dim)
if output_rep == 'Chi':
return _to_chi(input_rep, data, input_dim, output_dim)
if output_rep == 'PTM':
return _to_ptm(input_rep, data, input_dim, output_dim)
if output_rep == 'Stinespring':
return _to_stinespring(input_rep, data, input_dim, output_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(output_rep))
def _to_choi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Choi representation."""
if rep == 'Choi':
return data
if rep == 'Operator':
return _from_operator('Choi', data, input_dim, output_dim)
if rep == 'SuperOp':
return _superop_to_choi(data, input_dim, output_dim)
if rep == 'Kraus':
return _kraus_to_choi(data)
if rep == 'Chi':
return _chi_to_choi(data, input_dim)
if rep == 'PTM':
data = _ptm_to_superop(data, input_dim)
return _superop_to_choi(data, input_dim, output_dim)
if rep == 'Stinespring':
return _stinespring_to_choi(data, input_dim, output_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(rep))
def _to_superop(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the SuperOp representation."""
if rep == 'SuperOp':
return data
if rep == 'Operator':
return _from_operator('SuperOp', data, input_dim, output_dim)
if rep == 'Choi':
return _choi_to_superop(data, input_dim, output_dim)
if rep == 'Kraus':
return _kraus_to_superop(data)
if rep == 'Chi':
data = _chi_to_choi(data, input_dim)
return _choi_to_superop(data, input_dim, output_dim)
if rep == 'PTM':
return _ptm_to_superop(data, input_dim)
if rep == 'Stinespring':
return _stinespring_to_superop(data, input_dim, output_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(rep))
def _to_kraus(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Kraus representation."""
if rep == 'Kraus':
return data
if rep == 'Stinespring':
return _stinespring_to_kraus(data, output_dim)
if rep == 'Operator':
return _from_operator('Kraus', data, input_dim, output_dim)
# Convert via Choi and Kraus
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_kraus(data, input_dim, output_dim)
def _to_chi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Chi representation."""
if rep == 'Chi':
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == 'Operator':
return _from_operator('Chi', data, input_dim, output_dim)
# Convert via Choi representation
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_chi(data, input_dim)
def _to_ptm(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the PTM representation."""
if rep == 'PTM':
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == 'Operator':
return _from_operator('PTM', data, input_dim, output_dim)
# Convert via Superoperator representation
if rep != 'SuperOp':
data = _to_superop(rep, data, input_dim, output_dim)
return _superop_to_ptm(data, input_dim)
def _to_stinespring(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Stinespring representation."""
if rep == 'Stinespring':
return data
if rep == 'Operator':
return _from_operator('Stinespring', data, input_dim, output_dim)
# Convert via Superoperator representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_stinespring(data, input_dim, output_dim)
def _to_operator(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Operator representation."""
if rep == 'Operator':
return data
if rep == 'Stinespring':
return _stinespring_to_operator(data, output_dim)
# Convert via Kraus representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_operator(data)
def _from_operator(rep, data, input_dim, output_dim):
"""Transform Operator representation to other representation."""
if rep == 'Operator':
return data
if rep == 'SuperOp':
return np.kron(np.conj(data), data)
if rep == 'Choi':
vec = np.ravel(data, order='F')
return np.outer(vec, np.conj(vec))
if rep == 'Kraus':
return [data], None
if rep == 'Stinespring':
return data, None
if rep == 'Chi':
_check_nqubit_dim(input_dim, output_dim)
data = _from_operator('Choi', data, input_dim, output_dim)
return _choi_to_chi(data, input_dim)
if rep == 'PTM':
_check_nqubit_dim(input_dim, output_dim)
data = _from_operator('SuperOp', data, input_dim, output_dim)
return _superop_to_ptm(data, input_dim)
raise QiskitError('Invalid QuantumChannel {}'.format(rep))
def _kraus_to_operator(data):
"""Transform Kraus representation to Operator representation."""
if data[1] is not None or len(data[0]) > 1:
raise QiskitError(
'Channel cannot be converted to Operator representation')
return data[0][0]
def _stinespring_to_operator(data, output_dim):
"""Transform Stinespring representation to Operator representation."""
trace_dim = data[0].shape[0] // output_dim
if data[1] is not None or trace_dim != 1:
raise QiskitError(
'Channel cannot be converted to Operator representation')
return data[0]
def _superop_to_choi(data, input_dim, output_dim):
"""Transform SuperOp representation to Choi representation."""
shape = (output_dim, output_dim, input_dim, input_dim)
return _reshuffle(data, shape)
def _choi_to_superop(data, input_dim, output_dim):
"""Transform Choi to SuperOp representation."""
shape = (input_dim, output_dim, input_dim, output_dim)
return _reshuffle(data, shape)
def _kraus_to_choi(data):
"""Transform Kraus representation to Choi representation."""
choi = 0
kraus_l, kraus_r = data
if kraus_r is None:
for i in kraus_l:
vec = i.ravel(order='F')
choi += np.outer(vec, vec.conj())
else:
for i, j in zip(kraus_l, kraus_r):
choi += np.outer(i.ravel(order='F'), j.ravel(order='F').conj())
return choi
def _choi_to_kraus(data, input_dim, output_dim, atol=ATOL_DEFAULT):
"""Transform Choi representation to Kraus representation."""
# Check if hermitian matrix
if is_hermitian_matrix(data, atol=atol):
# Get eigen-decomposition of Choi-matrix
# This should be a call to la.eigh, but there is an OpenBlas
# threading issue that is causing segfaults.
# Need schur here since la.eig does not
# guarentee orthogonality in degenerate subspaces
w, v = la.schur(data, output='complex')
w = w.diagonal().real
# Check eigenvalues are non-negative
if len(w[w < -atol]) == 0:
# CP-map Kraus representation
kraus = []
for val, vec in zip(w, v.T):
if abs(val) > atol:
k = np.sqrt(val) * vec.reshape(
(output_dim, input_dim), order='F')
kraus.append(k)
# If we are converting a zero matrix, we need to return a Kraus set
# with a single zero-element Kraus matrix
if not kraus:
kraus.append(np.zeros((output_dim, input_dim), dtype=complex))
return kraus, None
# Non-CP-map generalized Kraus representation
mat_u, svals, mat_vh = la.svd(data)
kraus_l = []
kraus_r = []
for val, vec_l, vec_r in zip(svals, mat_u.T, mat_vh.conj()):
kraus_l.append(
np.sqrt(val) * vec_l.reshape((output_dim, input_dim), order='F'))
kraus_r.append(
np.sqrt(val) * vec_r.reshape((output_dim, input_dim), order='F'))
return kraus_l, kraus_r
def _stinespring_to_kraus(data, output_dim):
"""Transform Stinespring representation to Kraus representation."""
kraus_pair = []
for stine in data:
if stine is None:
kraus_pair.append(None)
else:
trace_dim = stine.shape[0] // output_dim
iden = np.eye(output_dim)
kraus = []
for j in range(trace_dim):
vec = np.zeros(trace_dim)
vec[j] = 1
kraus.append(np.kron(iden, vec[None, :]).dot(stine))
kraus_pair.append(kraus)
return tuple(kraus_pair)
def _stinespring_to_choi(data, input_dim, output_dim):
"""Transform Stinespring representation to Choi representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum('iAj,kAl->jilk', stine_l, stine_r.conj()),
2 * [input_dim * output_dim])
def _stinespring_to_superop(data, input_dim, output_dim):
"""Transform Stinespring representation to SuperOp representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum('iAj,kAl->ikjl', stine_r.conj(), stine_l),
(output_dim * output_dim, input_dim * input_dim))
def _kraus_to_stinespring(data, input_dim, output_dim):
"""Transform Kraus representation to Stinespring representation."""
stine_pair = [None, None]
for i, kraus in enumerate(data):
if kraus is not None:
num_kraus = len(kraus)
stine = np.zeros((output_dim * num_kraus, input_dim),
dtype=complex)
for j, mat in enumerate(kraus):
vec = np.zeros(num_kraus)
vec[j] = 1
stine += np.kron(mat, vec[:, None])
stine_pair[i] = stine
return tuple(stine_pair)
def _kraus_to_superop(data):
"""Transform Kraus representation to SuperOp representation."""
kraus_l, kraus_r = data
superop = 0
if kraus_r is None:
for i in kraus_l:
superop += np.kron(np.conj(i), i)
else:
for i, j in zip(kraus_l, kraus_r):
superop += np.kron(np.conj(j), i)
return superop
def _chi_to_choi(data, input_dim):
"""Transform Chi representation to a Choi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_from_pauli(data, num_qubits)
def _choi_to_chi(data, input_dim):
"""Transform Choi representation to the Chi representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits)
def _ptm_to_superop(data, input_dim):
"""Transform PTM representation to SuperOp representation."""
num_qubits = int(np.log2(input_dim))
return _transform_from_pauli(data, num_qubits)
def _superop_to_ptm(data, input_dim):
"""Transform SuperOp representation to PTM representation."""
num_qubits = int(np.log2(input_dim))
return _transform_to_pauli(data, num_qubits)
def _bipartite_tensor(mat1, mat2, shape1=None, shape2=None):
"""Tensor product (A β B) to bipartite matrices and reravel indices.
This is used for tensor product of superoperators and Choi matrices.
Args:
mat1 (matrix_like): a bipartite matrix A
mat2 (matrix_like): a bipartite matrix B
shape1 (tuple): bipartite-shape for matrix A (a0, a1, a2, a3)
shape2 (tuple): bipartite-shape for matrix B (b0, b1, b2, b3)
Returns:
np.array: a bipartite matrix for reravel(A β B).
Raises:
QiskitError: if input matrices are wrong shape.
"""
# Convert inputs to numpy arrays
mat1 = np.array(mat1)
mat2 = np.array(mat2)
# Determine bipartite dimensions if not provided
dim_a0, dim_a1 = mat1.shape
dim_b0, dim_b1 = mat2.shape
if shape1 is None:
sdim_a0 = int(np.sqrt(dim_a0))
sdim_a1 = int(np.sqrt(dim_a1))
shape1 = (sdim_a0, sdim_a0, sdim_a1, sdim_a1)
if shape2 is None:
sdim_b0 = int(np.sqrt(dim_b0))
sdim_b1 = int(np.sqrt(dim_b1))
shape2 = (sdim_b0, sdim_b0, sdim_b1, sdim_b1)
# Check dimensions
if len(shape1) != 4 or shape1[0] * shape1[1] != dim_a0 or \
shape1[2] * shape1[3] != dim_a1:
raise QiskitError("Invalid shape_a")
if len(shape2) != 4 or shape2[0] * shape2[1] != dim_b0 or \
shape2[2] * shape2[3] != dim_b1:
raise QiskitError("Invalid shape_b")
return _reravel(mat1, mat2, shape1, shape2)
def _reravel(mat1, mat2, shape1, shape2):
"""Reravel two bipartite matrices."""
# Reshuffle indices
left_dims = shape1[:2] + shape2[:2]
right_dims = shape1[2:] + shape2[2:]
tensor_shape = left_dims + right_dims
final_shape = (np.product(left_dims), np.product(right_dims))
# Tensor product matrices
data = np.kron(mat1, mat2)
data = np.reshape(
np.transpose(np.reshape(data, tensor_shape), (0, 2, 1, 3, 4, 6, 5, 7)),
final_shape)
return data
def _transform_to_pauli(data, num_qubits):
"""Change of basis of bipartite matrix representation."""
# Change basis: um_{i=0}^3 |i>><\sigma_i|
basis_mat = np.array(
[[1, 0, 0, 1], [0, 1, 1, 0], [0, -1j, 1j, 0], [1, 0j, 0, -1]],
dtype=complex)
# Note that we manually renormalized after change of basis
# to avoid rounding errors from square-roots of 2.
cob = basis_mat
for _ in range(num_qubits - 1):
dim = int(np.sqrt(len(cob)))
cob = np.reshape(
np.transpose(
np.reshape(
np.kron(basis_mat, cob), (4, dim * dim, 2, 2, dim, dim)),
(0, 1, 2, 4, 3, 5)), (4 * dim * dim, 4 * dim * dim))
return np.dot(np.dot(cob, data), cob.conj().T) / 2**num_qubits
def _transform_from_pauli(data, num_qubits):
"""Change of basis of bipartite matrix representation."""
# Change basis: sum_{i=0}^3 =|\sigma_i>><i|
basis_mat = np.array(
[[1, 0, 0, 1], [0, 1, 1j, 0], [0, 1, -1j, 0], [1, 0j, 0, -1]],
dtype=complex)
# Note that we manually renormalized after change of basis
# to avoid rounding errors from square-roots of 2.
cob = basis_mat
for _ in range(num_qubits - 1):
dim = int(np.sqrt(len(cob)))
cob = np.reshape(
np.transpose(
np.reshape(
np.kron(basis_mat, cob), (2, 2, dim, dim, 4, dim * dim)),
(0, 2, 1, 3, 4, 5)), (4 * dim * dim, 4 * dim * dim))
return np.dot(np.dot(cob, data), cob.conj().T) / 2**num_qubits
def _reshuffle(mat, shape):
"""Reshuffle the indices of a bipartite matrix A[ij,kl] -> A[lj,ki]."""
return np.reshape(
np.transpose(np.reshape(mat, shape), (3, 1, 2, 0)),
(shape[3] * shape[1], shape[0] * shape[2]))
def _check_nqubit_dim(input_dim, output_dim):
"""Return true if dims correspond to an n-qubit channel."""
if input_dim != output_dim:
raise QiskitError(
'Not an n-qubit channel: input_dim' +
' ({}) != output_dim ({})'.format(input_dim, output_dim))
num_qubits = int(np.log2(input_dim))
if 2**num_qubits != input_dim:
raise QiskitError('Not an n-qubit channel: input_dim != 2 ** n')
|
gadial/qiskit-terra | qiskit/algorithms/phase_estimators/hamiltonian_phase_estimation_result.py | <reponame>gadial/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Result from running HamiltonianPhaseEstimation"""
from typing import Dict, Union, cast
from qiskit.algorithms.algorithm_result import AlgorithmResult
from .phase_estimation_result import PhaseEstimationResult
from .phase_estimation_scale import PhaseEstimationScale
class HamiltonianPhaseEstimationResult(AlgorithmResult):
"""Store and manipulate results from running `HamiltonianPhaseEstimation`.
This API of this class is nearly the same as `PhaseEstimatorResult`, differing only in
the presence of an additional keyword argument in the methods. If `scaled`
is `False`, then the phases are not translated and scaled to recover the
eigenvalues of the Hamiltonian. Instead `phi` in :math:`[0, 1)` is returned,
as is the case when then unitary is not derived from a Hamiltonian.
This class is meant to be instantiated via `HamiltonianPhaseEstimation.estimate`.
"""
def __init__(self,
phase_estimation_result: PhaseEstimationResult,
phase_estimation_scale: PhaseEstimationScale,
id_coefficient: float,
) -> None:
"""
Args:
phase_estimation_result: The result object returned by PhaseEstimation.estimate.
phase_estimation_scale: object used to scale phases to obtain eigenvalues.
id_coefficient: The coefficient of the identity term in the Hamiltonian.
Eigenvalues are computed without this term so that the
coefficient must added to give correct eigenvalues.
This is done automatically when retrieving eigenvalues.
"""
super().__init__()
self._phase_estimation_scale = phase_estimation_scale
self._id_coefficient = id_coefficient
self._phase_estimation_result = phase_estimation_result
# pylint: disable=arguments-differ
def filter_phases(self, cutoff: float = 0.0, scaled: bool = True,
as_float: bool = True) -> Dict[Union[str, float], float]:
"""Filter phases as does `PhaseEstimatorResult.filter_phases`, with
the addition that `phi` is shifted and translated to return eigenvalues
of the Hamiltonian.
Args:
cutoff: Minimum weight of number of counts required to keep a bit string.
The default value is `0.0`.
scaled: If False, return `phi` in :math:`[0, 1)` rather than the eigenvalues of
the Hamiltonian.
as_float: If `True`, returned keys are floats in :math:`[0.0, 1.0)`. If `False`
returned keys are bit strings.
Raises:
ValueError: if `as_float` is `False` and `scaled` is `True`.
Returns:
A dict of filtered phases.
"""
if scaled and not as_float:
raise ValueError('`as_float` must be `True` if `scaled` is `True`.')
phases = self._phase_estimation_result.filter_phases(cutoff, as_float=as_float)
if scaled:
return cast(Dict, self._phase_estimation_scale.scale_phases(phases,
self._id_coefficient))
else:
return cast(Dict, phases)
@property
def most_likely_phase(self) -> float:
"""The most likely phase of the unitary corresponding to the Hamiltonian.
Returns:
The most likely phase.
"""
return self._phase_estimation_result.most_likely_phase
@property
def most_likely_eigenvalue(self) -> float:
"""The most likely eigenvalue of the Hamiltonian.
This method calls `most_likely_phase` and scales the result to
obtain an eigenvalue.
Returns:
The most likely eigenvalue of the Hamiltonian.
"""
phase = self._phase_estimation_result.most_likely_phase
return self._phase_estimation_scale.scale_phase(phase, self._id_coefficient)
|
gadial/qiskit-terra | qiskit/algorithms/phase_estimators/phase_estimator.py | <filename>qiskit/algorithms/phase_estimators/phase_estimator.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Phase Estimator interface."""
from typing import Optional
from abc import ABC, abstractmethod, abstractproperty
from qiskit.circuit import QuantumCircuit
from qiskit.algorithms.algorithm_result import AlgorithmResult
class PhaseEstimator(ABC):
"""The Phase Estimator interface.
Algorithms that can compute a phase for a unitary operator and
initial state may implement this interface to allow different
algorithms to be used interchangeably.
"""
@abstractmethod
def estimate(self,
unitary: Optional[QuantumCircuit] = None,
state_preparation: Optional[QuantumCircuit] = None,
pe_circuit: Optional[QuantumCircuit] = None,
num_unitary_qubits: Optional[int] = None) -> 'PhaseEstimatorResult':
"""Estimate the phase."""
raise NotImplementedError
class PhaseEstimatorResult(AlgorithmResult):
"""Phase Estimator Result."""
@abstractproperty
def most_likely_phase(self) -> float:
r"""Return the estimated phase as a number in :math:`[0.0, 1.0)`.
1.0 corresponds to a phase of :math:`2\pi`. It is assumed that the input vector is an
eigenvector of the unitary so that the peak of the probability density occurs at the bit
string that most closely approximates the true phase.
"""
raise NotImplementedError
|
gadial/qiskit-terra | qiskit/algorithms/optimizers/spsa.py | <gh_stars>1-10
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Simultaneous Perturbation Stochastic Approximation (SPSA) optimizer."""
from typing import Iterator, Optional, Union, Callable, Tuple, Dict
import logging
import warnings
from time import time
from collections import deque
import numpy as np
from qiskit.utils import algorithm_globals
from .optimizer import Optimizer, OptimizerSupportLevel
# parameters, loss, stepsize, number of function evaluations, accepted
CALLBACK = Callable[[np.ndarray, float, float, int, bool], None]
logger = logging.getLogger(__name__)
class SPSA(Optimizer):
"""Simultaneous Perturbation Stochastic Approximation (SPSA) optimizer.
SPSA [1] is an algorithmic method for optimizing systems with multiple unknown parameters.
As an optimization method, it is appropriately suited to large-scale population models,
adaptive modeling, and simulation optimization.
.. seealso::
Many examples are presented at the `SPSA Web site <http://www.jhuapl.edu/SPSA>`__.
SPSA is a descent method capable of finding global minima,
sharing this property with other methods as simulated annealing.
Its main feature is the gradient approximation, which requires only two
measurements of the objective function, regardless of the dimension of the optimization
problem.
.. note::
SPSA can be used in the presence of noise, and it is therefore indicated in situations
involving measurement uncertainty on a quantum computation when finding a minimum.
If you are executing a variational algorithm using a Quantum ASseMbly Language (QASM)
simulator or a real device, SPSA would be the most recommended choice among the optimizers
provided here.
The optimization process can includes a calibration phase if neither the ``learning_rate`` nor
``perturbation`` is provided, which requires additional functional evaluations.
(Note that either both or none must be set.) For further details on the automatic calibration,
please refer to the supplementary information section IV. of [2].
References:
[1]: <NAME> (1998). An Overview of the Simultaneous Perturbation Method for Efficient
Optimization, Johns Hopkins APL Technical Digest, 19(4), 482β492.
`Online. <https://www.jhuapl.edu/SPSA/PDF-SPSA/Spall_An_Overview.PDF>`_
[2]: <NAME> al. (2017). Hardware-efficient Variational Quantum Eigensolver for
Small Molecules and Quantum Magnets. Nature 549, pages242β246(2017).
`arXiv:1704.05018v2 <https://arxiv.org/pdf/1704.05018v2.pdf#section*.11>`_
"""
def __init__(self,
maxiter: int = 100,
blocking: bool = False,
allowed_increase: Optional[float] = None,
trust_region: bool = False,
learning_rate: Optional[Union[float, Callable[[], Iterator]]] = None,
perturbation: Optional[Union[float, Callable[[], Iterator]]] = None,
last_avg: int = 1,
resamplings: Union[int, Dict[int, int]] = 1,
perturbation_dims: Optional[int] = None,
callback: Optional[CALLBACK] = None,
) -> None:
r"""
Args:
maxiter: The maximum number of iterations.
blocking: If True, only accepts updates that improve the loss (minus some allowed
increase, see next argument).
allowed_increase: If blocking is True, this sets by how much the loss can increase
and still be accepted. If None, calibrated automatically to be twice the
standard deviation of the loss function.
trust_region: If True, restricts norm of the random direction to be :math:`\leq 1`.
learning_rate: A generator yielding learning rates for the parameter updates,
:math:`a_k`. If set, also ``perturbation`` must be provided.
perturbation: A generator yielding the perturbation magnitudes :math:`c_k`. If set,
also ``learning_rate`` must be provided.
last_avg: Return the average of the ``last_avg`` parameters instead of just the
last parameter values.
resamplings: The number of times the gradient is sampled using a random direction to
construct a gradient estimate. Per default the gradient is estimated using only
one random direction. If an integer, all iterations use the same number of
resamplings. If a dictionary, this is interpreted as
``{iteration: number of resamplings per iteration}``.
perturbation_dims: The number of perturbed dimensions. Per default, all dimensions
are perturbed, but a smaller, fixed number can be perturbed. If set, the perturbed
dimensions are chosen uniformly at random.
callback: A callback function passed information in each iteration step. The
information is, in this order: the parameters, the function value, the number
of function evaluations, the stepsize, whether the step was accepted.
"""
super().__init__()
if isinstance(learning_rate, float):
self.learning_rate = lambda: constant(learning_rate)
else:
self.learning_rate = learning_rate
if isinstance(perturbation, float):
self.perturbation = lambda: constant(perturbation)
else:
self.perturbation = perturbation
self.maxiter = maxiter
self.blocking = blocking
self.allowed_increase = allowed_increase
self.trust_region = trust_region
self.callback = callback
self.last_avg = last_avg
self.resamplings = resamplings
self.perturbation_dims = perturbation_dims
# runtime arguments
self._nfev = None
@staticmethod
def calibrate(loss: Callable[[np.ndarray], float],
initial_point: np.ndarray,
c: float = 0.2,
stability_constant: float = 0,
target_magnitude: Optional[float] = None, # 2 pi / 10
alpha: float = 0.602,
gamma: float = 0.101,
modelspace: bool = False) -> Tuple[Iterator[float], Iterator[float]]:
r"""Calibrate SPSA parameters with a powerseries as learning rate and perturbation coeffs.
The powerseries are:
.. math::
a_k = \frac{a}{(A + k + 1)^\alpha}, c_k = \frac{c}{(k + 1)^\gamma}
Args:
loss: The loss function.
initial_point: The initial guess of the iteration.
c: The initial perturbation magnitude.
stability_constant: The value of `A`.
target_magnitude: The target magnitude for the first update step, defaults to
:math:`2\pi / 10`.
alpha: The exponent of the learning rate powerseries.
gamma: The exponent of the perturbation powerseries.
modelspace: Whether the target magnitude is the difference of parameter values
or function values (= model space).
Returns:
tuple(generator, generator): A tuple of powerseries generators, the first one for the
learning rate and the second one for the perturbation.
"""
if target_magnitude is None:
target_magnitude = 2 * np.pi / 10
dim = len(initial_point)
# compute the average magnitude of the first step
steps = 25
avg_magnitudes = 0
for _ in range(steps):
# compute the random directon
pert = bernoulli_perturbation(dim)
delta = loss(initial_point + c * pert) - loss(initial_point - c * pert)
avg_magnitudes += np.abs(delta / (2 * c))
avg_magnitudes /= steps
if modelspace:
a = target_magnitude / (avg_magnitudes ** 2)
else:
a = target_magnitude / avg_magnitudes
# compute the rescaling factor for correct first learning rate
if a < 1e-10:
warnings.warn(f'Calibration failed, using {target_magnitude} for `a`')
a = target_magnitude
# set up the powerseries
def learning_rate():
return powerseries(a, alpha, stability_constant)
def perturbation():
return powerseries(c, gamma)
return learning_rate, perturbation
@staticmethod
def estimate_stddev(loss: Callable[[np.ndarray], float],
initial_point: np.ndarray,
avg: int = 25) -> float:
"""Estimate the standard deviation of the loss function."""
losses = [loss(initial_point) for _ in range(avg)]
return np.std(losses)
def _point_sample(self, loss, x, eps, delta):
"""A single sample of the gradient at position ``x`` in direction ``delta``."""
if self._max_evals_grouped > 1:
plus, minus = loss(np.concatenate((x + eps * delta, x - eps * delta)))
else:
plus, minus = loss(x + eps * delta), loss(x - eps * delta)
gradient_sample = (plus - minus) / (2 * eps) * delta
self._nfev += 2
return gradient_sample
def _point_estimate(self, loss, x, eps, deltas):
"""The gradient estimate at point ``x`` consisting as average of all directions ``delta``.
"""
# number of samples
resamplings = len(deltas)
# set up variables to store averages
gradient_estimate = np.zeros(x.size)
# iterate over the directions
for delta in deltas:
gradient_sample = self._point_sample(loss, x, eps, delta)
gradient_estimate += gradient_sample
return gradient_estimate / resamplings
def _compute_update(self, loss, x, k, eps):
# compute the perturbations
if isinstance(self.resamplings, dict):
avg = self.resamplings.get(k, 1)
else:
avg = self.resamplings
gradient = np.zeros(x.size)
# accumulate the number of samples
deltas = [bernoulli_perturbation(x.size, self.perturbation_dims) for _ in range(avg)]
gradient = self._point_estimate(loss, x, eps, deltas)
return gradient
def _minimize(self, loss, initial_point):
# ensure learning rate and perturbation are correctly set: either none or both
# this happens only here because for the calibration the loss function is required
if self.learning_rate is None and self.perturbation is None:
get_learning_rate, get_perturbation = self.calibrate(loss, initial_point)
# get iterator
eta = get_learning_rate()
eps = get_perturbation()
elif self.learning_rate is None or self.perturbation is None:
raise ValueError('If one of learning rate or perturbation is set, both must be set.')
else:
# get iterator
eta = self.learning_rate()
eps = self.perturbation()
# prepare some initials
x = np.asarray(initial_point)
self._nfev = 0
# if blocking is enabled we need to keep track of the function values
if self.blocking:
fx = loss(x)
self._nfev += 1
if self.allowed_increase is None:
self.allowed_increase = 2 * self.estimate_stddev(loss, x)
logger.info('=' * 30)
logger.info('Starting SPSA optimization')
start = time()
# keep track of the last few steps to return their average
last_steps = deque([x])
for k in range(1, self.maxiter + 1):
iteration_start = time()
# compute update
update = self._compute_update(loss, x, k, next(eps))
# trust region
if self.trust_region:
norm = np.linalg.norm(update)
if norm > 1: # stop from dividing by 0
update = update / norm
# compute next parameter value
update = update * next(eta)
x_next = x - update
# blocking
if self.blocking:
fx_next = loss(x_next)
self._nfev += 1
if fx + self.allowed_increase <= fx_next: # accept only if loss improved
if self.callback is not None:
self.callback(self._nfev, # number of function evals
x_next, # next parameters
fx_next, # loss at next parameters
np.linalg.norm(update), # size of the update step
False) # not accepted
logger.info('Iteration %s/%s rejected in %s.',
k, self.maxiter + 1, time() - iteration_start)
continue
fx = fx_next
logger.info('Iteration %s/%s done in %s.',
k, self.maxiter + 1, time() - iteration_start)
if self.callback is not None:
self.callback(self._nfev, # number of function evals
x_next, # next parameters
fx_next, # loss at next parameters
np.linalg.norm(update), # size of the update step
True) # accepted
# update parameters
x = x_next
# update the list of the last ``last_avg`` parameters
if self.last_avg > 1:
last_steps.append(x_next)
if len(last_steps) > self.last_avg:
last_steps.popleft()
logger.info('SPSA finished in %s', time() - start)
logger.info('=' * 30)
if self.last_avg > 1:
x = np.mean(last_steps, axis=0)
return x, loss(x), self._nfev
def get_support_level(self):
"""Get the support level dictionary."""
return {
'gradient': OptimizerSupportLevel.ignored,
'bounds': OptimizerSupportLevel.ignored,
'initial_point': OptimizerSupportLevel.required
}
def optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
return self._minimize(objective_function, initial_point)
def bernoulli_perturbation(dim, perturbation_dims=None):
"""Get a Bernoulli random perturbation."""
if perturbation_dims is None:
return 1 - 2 * algorithm_globals.random.binomial(1, 0.5, size=dim)
pert = 1 - 2 * algorithm_globals.random.binomial(1, 0.5, size=perturbation_dims)
indices = algorithm_globals.random.choice(list(range(dim)), size=perturbation_dims,
replace=False)
result = np.zeros(dim)
result[indices] = pert
return result
def powerseries(eta=0.01, power=2, offset=0):
"""Yield a series decreasing by a powerlaw."""
n = 1
while True:
yield eta / ((n + offset) ** power)
n += 1
def constant(eta=0.01):
"""Yield a constant series."""
while True:
yield eta
|
gadial/qiskit-terra | qiskit/providers/basicaer/basicaertools.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Contains functions used by the basic aer simulators.
"""
from string import ascii_uppercase, ascii_lowercase
from typing import List, Optional
import numpy as np
import qiskit.circuit.library.standard_gates as gates
from qiskit.exceptions import QiskitError
# Single qubit gates supported by ``single_gate_params``.
SINGLE_QUBIT_GATES = ('U', 'u1', 'u2', 'u3', 'rz', 'sx', 'x')
def single_gate_matrix(gate: str, params: Optional[List[float]] = None):
"""Get the matrix for a single qubit.
Args:
gate: the single qubit gate name
params: the operation parameters op['params']
Returns:
array: A numpy array representing the matrix
Raises:
QiskitError: If a gate outside the supported set is passed in for the
``Gate`` argument.
"""
if params is None:
params = []
if gate == 'U':
gc = gates.UGate
elif gate == 'u3':
gc = gates.U3Gate
elif gate == 'u2':
gc = gates.U2Gate
elif gate == 'u1':
gc = gates.U1Gate
elif gate == 'rz':
gc = gates.RZGate
elif gate == 'id':
gc = gates.IGate
elif gate == 'sx':
gc = gates.SXGate
elif gate == 'x':
gc = gates.XGate
else:
raise QiskitError('Gate is not a valid basis gate for this simulator: %s' % gate)
return gc(*params).to_matrix()
# Cache CX matrix as no parameters.
_CX_MATRIX = gates.CXGate().to_matrix()
def cx_gate_matrix():
"""Get the matrix for a controlled-NOT gate."""
return np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]], dtype=complex)
def einsum_matmul_index(gate_indices, number_of_qubits):
"""Return the index string for Numpy.einsum matrix-matrix multiplication.
The returned indices are to perform a matrix multiplication A.B where
the matrix A is an M-qubit matrix, matrix B is an N-qubit matrix, and
M <= N, and identity matrices are implied on the subsystems where A has no
support on B.
Args:
gate_indices (list[int]): the indices of the right matrix subsystems
to contract with the left matrix.
number_of_qubits (int): the total number of qubits for the right matrix.
Returns:
str: An indices string for the Numpy.einsum function.
"""
mat_l, mat_r, tens_lin, tens_lout = _einsum_matmul_index_helper(gate_indices,
number_of_qubits)
# Right indices for the N-qubit input and output tensor
tens_r = ascii_uppercase[:number_of_qubits]
# Combine indices into matrix multiplication string format
# for numpy.einsum function
return "{mat_l}{mat_r}, ".format(mat_l=mat_l, mat_r=mat_r) + \
"{tens_lin}{tens_r}->{tens_lout}{tens_r}".format(tens_lin=tens_lin,
tens_lout=tens_lout,
tens_r=tens_r)
def einsum_vecmul_index(gate_indices, number_of_qubits):
"""Return the index string for Numpy.einsum matrix-vector multiplication.
The returned indices are to perform a matrix multiplication A.v where
the matrix A is an M-qubit matrix, vector v is an N-qubit vector, and
M <= N, and identity matrices are implied on the subsystems where A has no
support on v.
Args:
gate_indices (list[int]): the indices of the right matrix subsystems
to contract with the left matrix.
number_of_qubits (int): the total number of qubits for the right matrix.
Returns:
str: An indices string for the Numpy.einsum function.
"""
mat_l, mat_r, tens_lin, tens_lout = _einsum_matmul_index_helper(gate_indices,
number_of_qubits)
# Combine indices into matrix multiplication string format
# for numpy.einsum function
return "{mat_l}{mat_r}, ".format(mat_l=mat_l, mat_r=mat_r) + \
"{tens_lin}->{tens_lout}".format(tens_lin=tens_lin,
tens_lout=tens_lout)
def _einsum_matmul_index_helper(gate_indices, number_of_qubits):
"""Return the index string for Numpy.einsum matrix multiplication.
The returned indices are to perform a matrix multiplication A.v where
the matrix A is an M-qubit matrix, matrix v is an N-qubit vector, and
M <= N, and identity matrices are implied on the subsystems where A has no
support on v.
Args:
gate_indices (list[int]): the indices of the right matrix subsystems
to contract with the left matrix.
number_of_qubits (int): the total number of qubits for the right matrix.
Returns:
tuple: (mat_left, mat_right, tens_in, tens_out) of index strings for
that may be combined into a Numpy.einsum function string.
Raises:
QiskitError: if the total number of qubits plus the number of
contracted indices is greater than 26.
"""
# Since we use ASCII alphabet for einsum index labels we are limited
# to 26 total free left (lowercase) and 26 right (uppercase) indexes.
# The rank of the contracted tensor reduces this as we need to use that
# many characters for the contracted indices
if len(gate_indices) + number_of_qubits > 26:
raise QiskitError("Total number of free indexes limited to 26")
# Indices for N-qubit input tensor
tens_in = ascii_lowercase[:number_of_qubits]
# Indices for the N-qubit output tensor
tens_out = list(tens_in)
# Left and right indices for the M-qubit multiplying tensor
mat_left = ""
mat_right = ""
# Update left indices for mat and output
for pos, idx in enumerate(reversed(gate_indices)):
mat_left += ascii_lowercase[-1 - pos]
mat_right += tens_in[-1 - idx]
tens_out[-1 - idx] = ascii_lowercase[-1 - pos]
tens_out = "".join(tens_out)
# Combine indices into matrix multiplication string format
# for numpy.einsum function
return mat_left, mat_right, tens_in, tens_out
|
gadial/qiskit-terra | qiskit/circuit/library/arithmetic/polynomial_pauli_rotations.py | <reponame>gadial/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-member
"""Polynomially controlled Pauli-rotations."""
import warnings
from typing import List, Optional, Dict, Sequence
from itertools import product
from qiskit.circuit import QuantumRegister
from qiskit.circuit.exceptions import CircuitError
from .functional_pauli_rotations import FunctionalPauliRotations
def _binomial_coefficients(n):
""""Return a dictionary of binomial coefficients
Based-on/forked from sympy's binomial_coefficients() function [#]
.. [#] https://github.com/sympy/sympy/blob/sympy-1.5.1/sympy/ntheory/multinomial.py
"""
data = {(0, n): 1, (n, 0): 1}
temp = 1
for k in range(1, n // 2 + 1):
temp = (temp * (n - k + 1)) // k
data[k, n - k] = data[n - k, k] = temp
return data
def _large_coefficients_iter(m, n):
""""Return an iterator of multinomial coefficients
Based-on/forked from sympy's multinomial_coefficients_iterator() function [#]
.. [#] https://github.com/sympy/sympy/blob/sympy-1.5.1/sympy/ntheory/multinomial.py
"""
if m < 2*n or n == 1:
coefficients = _multinomial_coefficients(m, n)
for key, value in coefficients.items():
yield(key, value)
else:
coefficients = _multinomial_coefficients(n, n)
coefficients_dict = {}
for key, value in coefficients.items():
coefficients_dict[tuple(filter(None, key))] = value
coefficients = coefficients_dict
temp = [n] + [0] * (m - 1)
temp_a = tuple(temp)
b = tuple(filter(None, temp_a))
yield (temp_a, coefficients[b])
if n:
j = 0 # j will be the leftmost nonzero position
else:
j = m
# enumerate tuples in co-lex order
while j < m - 1:
# compute next tuple
temp_j = temp[j]
if j:
temp[j] = 0
temp[0] = temp_j
if temp_j > 1:
temp[j + 1] += 1
j = 0
else:
j += 1
temp[j] += 1
temp[0] -= 1
temp_a = tuple(temp)
b = tuple(filter(None, temp_a))
yield (temp_a, coefficients[b])
def _multinomial_coefficients(m, n):
""""Return an iterator of multinomial coefficients
Based-on/forked from sympy's multinomial_coefficients() function [#]
.. [#] https://github.com/sympy/sympy/blob/sympy-1.5.1/sympy/ntheory/multinomial.py
"""
if not m:
if n:
return {}
return {(): 1}
if m == 2:
return _binomial_coefficients(n)
if m >= 2*n and n > 1:
return dict(_large_coefficients_iter(m, n))
if n:
j = 0
else:
j = m
temp = [n] + [0] * (m - 1)
res = {tuple(temp): 1}
while j < m - 1:
temp_j = temp[j]
if j:
temp[j] = 0
temp[0] = temp_j
if temp_j > 1:
temp[j + 1] += 1
j = 0
start = 1
v = 0
else:
j += 1
start = j + 1
v = res[tuple(temp)]
temp[j] += 1
for k in range(start, m):
if temp[k]:
temp[k] -= 1
v += res[tuple(temp)]
temp[k] += 1
temp[0] -= 1
res[tuple(temp)] = (v * temp_j) // (n - temp[0])
return res
class PolynomialPauliRotations(FunctionalPauliRotations):
r"""A circuit implementing polynomial Pauli rotations.
For a polynomial :math`p(x)`, a basis state :math:`|i\rangle` and a target qubit
:math:`|0\rangle` this operator acts as:
.. math::
|i\rangle |0\rangle \mapsto \cos(p(i)) |i\rangle |0\rangle + \sin(p(i)) |i\rangle |1\rangle
Let n be the number of qubits representing the state, d the degree of p(x) and q_i the qubits,
where q_0 is the least significant qubit. Then for
.. math::
x = \sum_{i=0}^{n-1} 2^i q_i,
we can write
.. math::
p(x) = \sum_{j=0}^{j=d} c_j x_j
where :math:`c` are the input coefficients, ``coeffs``.
"""
def __init__(self, num_state_qubits: Optional[int] = None,
coeffs: Optional[List[float]] = None,
basis: str = 'Y',
reverse: bool = False,
name: str = 'poly') -> None:
"""Prepare an approximation to a state with amplitudes specified by a polynomial.
Args:
num_state_qubits: The number of qubits representing the state.
coeffs: The coefficients of the polynomial. ``coeffs[i]`` is the coefficient of the
i-th power of x. Defaults to linear: [0, 1].
basis: The type of Pauli rotation ('X', 'Y', 'Z').
reverse: If True, apply the polynomial with the reversed list of qubits
(i.e. q_n as q_0, q_n-1 as q_1, etc).
name: The name of the circuit.
"""
# set default internal parameters
self._coeffs = coeffs or [0, 1]
self._reverse = reverse
if self._reverse is True:
warnings.warn('The reverse flag has been deprecated. '
'Use circuit.reverse_bits() to reverse order of qubits.',
DeprecationWarning)
# initialize super (after setting coeffs)
super().__init__(num_state_qubits=num_state_qubits, basis=basis, name=name)
@property
def coeffs(self) -> List[float]:
"""The multiplicative factor in the rotation angle of the controlled rotations.
The rotation angles are ``slope * 2^0``, ``slope * 2^1``, ... , ``slope * 2^(n-1)`` where
``n`` is the number of state qubits.
Returns:
The rotation angle common in all controlled rotations.
"""
return self._coeffs
@coeffs.setter
def coeffs(self, coeffs: List[float]) -> None:
"""Set the multiplicative factor of the rotation angles.
Args:
The slope of the rotation angles.
"""
self._invalidate()
self._coeffs = coeffs
@property
def degree(self) -> int:
"""Return the degree of the polynomial, equals to the number of coefficients minus 1.
Returns:
The degree of the polynomial. If the coefficients have not been set, return 0.
"""
if self.coeffs:
return len(self.coeffs) - 1
return 0
@property
def reverse(self) -> bool:
"""Whether to apply the rotations on the reversed list of qubits.
Returns:
True, if the rotations are applied on the reversed list, False otherwise.
"""
return self._reverse
@property
def num_ancilla_qubits(self):
"""Deprecated. Use num_ancillas instead."""
warnings.warn('The PolynomialPauliRotations.num_ancilla_qubits property is deprecated '
'as of 0.16.0. It will be removed no earlier than 3 months after the release '
'date. You should use the num_ancillas property instead.',
DeprecationWarning, stacklevel=2)
return self.num_ancillas
def _reset_registers(self, num_state_qubits):
if num_state_qubits is not None:
# set new register of appropriate size
qr_state = QuantumRegister(num_state_qubits, name='state')
qr_target = QuantumRegister(1, name='target')
self.qregs = [qr_state, qr_target]
else:
self.qregs = []
def _check_configuration(self, raise_on_failure: bool = True) -> bool:
valid = True
if self.num_state_qubits is None:
valid = False
if raise_on_failure:
raise AttributeError('The number of qubits has not been set.')
if self.num_qubits < self.num_state_qubits + 1:
valid = False
if raise_on_failure:
raise CircuitError('Not enough qubits in the circuit, need at least '
'{}.'.format(self.num_state_qubits + 1))
return valid
def _get_rotation_coefficients(self) -> Dict[Sequence[int], float]:
"""Compute the coefficient of each monomial.
Returns:
A dictionary with pairs ``{control_state: rotation angle}`` where ``control_state``
is a tuple of ``0`` or ``1`` bits.
"""
# determine the control states
all_combinations = list(product([0, 1], repeat=self.num_state_qubits))
valid_combinations = []
for combination in all_combinations:
if 0 < sum(combination) <= self.degree:
valid_combinations += [combination]
rotation_coeffs = {control_state: 0 for control_state in valid_combinations}
# compute the coefficients for the control states
for i, coeff in enumerate(self.coeffs[1:]):
i += 1 # since we skip the first element we need to increase i by one
# iterate over the multinomial coefficients
for comb, num_combs in _multinomial_coefficients(self.num_state_qubits, i).items():
control_state = ()
power = 1
for j, qubit in enumerate(comb):
if qubit > 0: # means we control on qubit i
control_state += (1,)
power *= 2 ** (j * qubit)
else:
control_state += (0,)
# Add angle
rotation_coeffs[control_state] += coeff * num_combs * power
return rotation_coeffs
def _build(self):
# do not build the circuit if _data is already populated
if self._data is not None:
return
self._data = []
# check whether the configuration is valid
self._check_configuration()
qr_state = self.qubits[:self.num_state_qubits]
qr_target = self.qubits[self.num_state_qubits]
rotation_coeffs = self._get_rotation_coefficients()
if self.basis == 'x':
self.rx(self.coeffs[0], qr_target)
elif self.basis == 'y':
self.ry(self.coeffs[0], qr_target)
else:
self.rz(self.coeffs[0], qr_target)
for c in rotation_coeffs:
qr_control = []
if self.reverse:
for i, _ in enumerate(c):
if c[i] > 0:
qr_control.append(qr_state[qr_state.size - i - 1])
else:
for i, _ in enumerate(c):
if c[i] > 0:
qr_control.append(qr_state[i])
# apply controlled rotations
if len(qr_control) > 1:
if self.basis == 'x':
self.mcrx(rotation_coeffs[c], qr_control, qr_target)
elif self.basis == 'y':
self.mcry(rotation_coeffs[c], qr_control, qr_target)
else:
self.mcrz(rotation_coeffs[c], qr_control, qr_target)
elif len(qr_control) == 1:
if self.basis == 'x':
self.crx(rotation_coeffs[c], qr_control[0], qr_target)
elif self.basis == 'y':
self.cry(rotation_coeffs[c], qr_control[0], qr_target)
else:
self.crz(rotation_coeffs[c], qr_control[0], qr_target)
|
gadial/qiskit-terra | qiskit/quantum_info/operators/dihedral/random.py | <reponame>gadial/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Random CNOTDihedral operator functions
"""
import numpy as np
from numpy.random import default_rng
from .dihedral import CNOTDihedral
def random_cnotdihedral(num_qubits, seed=None):
"""Return a random CNOTDihedral element.
Args:
num_qubits (int): the number of qubits for the CNOTDihedral object.
seed (int or RandomState): Optional. Set a fixed seed or
generator for RNG.
Returns:
CNOTDihedral: a random CNOTDihedral element.
"""
if seed is None:
rng = np.random.default_rng()
elif isinstance(seed, np.random.Generator):
rng = seed
else:
rng = default_rng(seed)
elem = CNOTDihedral(num_qubits=num_qubits)
# Random phase polynomial weights
weight_1 = rng.integers(8, size=num_qubits)
elem.poly.weight_1 = weight_1
weight_2 = 2 * rng.integers(4, size=int(num_qubits * (num_qubits - 1) / 2))
elem.poly.weight_2 = weight_2
weight_3 = 4 * rng.integers(2, size=int(num_qubits * (num_qubits - 1) *
(num_qubits - 2) / 6))
elem.poly.weight_3 = weight_3
# Random affine function
# Random invertible binary matrix
det = 0
while np.allclose(det, 0) or np.allclose(det, 2):
linear = rng.integers(2, size=(num_qubits, num_qubits))
det = np.linalg.det(linear) % 2
elem.linear = linear
# Random shift
shift = rng.integers(2, size=num_qubits)
elem.shift = shift
return elem
|
gadial/qiskit-terra | qiskit/circuit/library/n_local/excitation_preserving.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The ExcitationPreserving 2-local circuit."""
from typing import Union, Optional, List, Tuple, Callable, Any
from numpy import pi
from qiskit.circuit import QuantumCircuit, Parameter
from qiskit.circuit.library.standard_gates import RZGate
from .two_local import TwoLocal
class ExcitationPreserving(TwoLocal):
r"""The heuristic excitation-preserving wave function ansatz.
The ``ExcitationPreserving`` circuit preserves the ratio of :math:`|00\rangle`,
:math:`|01\rangle + |10\rangle` and :math:`|11\rangle` states. The matrix representing
the operation is
.. math::
\newcommand{\th}{\theta/2}
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & \cos(\th) & -\sin(\th) & 0 \\
0 & \sin(\th) & \cos(\th) & 0 \\
0 & 0 & 0 & e^{-i\phi}
\end{pmatrix}
for the mode ``'fsim'`` or with :math:`e^{-i\phi} = 1` for the mode ``'iswap'``.
Note that other wave functions, such as UCC-ansatzes, are also excitation preserving.
However these can become complex quickly, while this heuristically motivated circuit follows
a simpler pattern.
This trial wave function consists of layers of :math:`Z` rotations with 2-qubit entanglements.
The entangling is creating using :math:`XX+YY` rotations and optionally a controlled-phase
gate for the mode ``'fsim'``.
See :class:`~qiskit.circuit.library.RealAmplitudes` for more detail on the possible arguments
and options such as skipping unentanglement qubits, which apply here too.
The rotations of the ExcitationPreserving ansatz can be written as
Examples:
>>> ansatz = ExcitationPreserving(3, reps=1, insert_barriers=True, entanglement='linear')
>>> print(ansatz) # show the circuit
ββββββββββββ β ββββββββββββββββββββββββββββ β ββββββββββββ
q_0: β€ RZ(ΞΈ[0]) βββββ€0 ββ€0 βββββββββββββββββββββββββββββββββ€ RZ(ΞΈ[5]) β
ββββββββββββ€ β β RXX(ΞΈ[3]) ββ RYY(ΞΈ[3]) βββββββββββββββββββββββββββββ β ββββββββββββ€
q_1: β€ RZ(ΞΈ[1]) βββββ€1 ββ€1 ββ€0 ββ€0 βββββ€ RZ(ΞΈ[6]) β
ββββββββββββ€ β βββββββββββββββββββββββββββββ RXX(ΞΈ[4]) ββ RYY(ΞΈ[4]) β β ββββββββββββ€
q_2: β€ RZ(ΞΈ[2]) βββββββββββββββββββββββββββββββββ€1 ββ€1 βββββ€ RZ(ΞΈ[7]) β
ββββββββββββ β ββββββββββββββββββββββββββββ β ββββββββββββ
>>> ansatz = ExcitationPreserving(2, reps=1)
>>> qc = QuantumCircuit(2) # create a circuit and append the RY variational form
>>> qc.cry(0.2, 0, 1) # do some previous operation
>>> qc.compose(ansatz, inplace=True) # add the swaprz
>>> qc.draw()
ββββββββββββββββββββββββββββββββββββββββββββββββββββ
q_0: ββββββ ββββββ€ RZ(ΞΈ[0]) ββ€0 ββ€0 ββ€ RZ(ΞΈ[3]) β
ββββββ΄βββββββββββββββββ€β RXX(ΞΈ[2]) ββ RYY(ΞΈ[2]) βββββββββββββ€
q_1: β€ RY(0.2) ββ€ RZ(ΞΈ[1]) ββ€1 ββ€1 ββ€ RZ(ΞΈ[4]) β
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
>>> ansatz = ExcitationPreserving(3, reps=1, mode='fsim', entanglement=[[0,2]],
... insert_barriers=True)
>>> print(ansatz)
ββββββββββββ β ββββββββββββββββββββββββββββ β ββββββββββββ
q_0: β€ RZ(ΞΈ[0]) βββββ€0 ββ€0 βββ βββββββββ€ RZ(ΞΈ[5]) β
ββββββββββββ€ β β ββ β β β ββββββββββββ€
q_1: β€ RZ(ΞΈ[1]) βββββ€ RXX(ΞΈ[3]) ββ€ RYY(ΞΈ[3]) βββΌβββββββββ€ RZ(ΞΈ[6]) β
ββββββββββββ€ β β ββ β βΞΈ[4] β ββββββββββββ€
q_2: β€ RZ(ΞΈ[2]) βββββ€1 ββ€1 βββ βββββββββ€ RZ(ΞΈ[7]) β
ββββββββββββ β ββββββββββββββββββββββββββββ β ββββββββββββ
"""
def __init__(self,
num_qubits: Optional[int] = None,
mode: str = 'iswap',
entanglement: Union[str, List[List[int]], Callable[[int], List[int]]] = 'full',
reps: int = 3,
skip_unentangled_qubits: bool = False,
skip_final_rotation_layer: bool = False,
parameter_prefix: str = 'ΞΈ',
insert_barriers: bool = False,
initial_state: Optional[Any] = None,
) -> None:
"""Create a new ExcitationPreserving 2-local circuit.
Args:
num_qubits: The number of qubits of the ExcitationPreserving circuit.
mode: Choose the entangler mode, can be `'iswap'` or `'fsim'`.
reps: Specifies how often the structure of a rotation layer followed by an entanglement
layer is repeated.
entanglement: Specifies the entanglement structure. Can be a string ('full', 'linear'
or 'sca'), a list of integer-pairs specifying the indices of qubits
entangled with one another, or a callable returning such a list provided with
the index of the entanglement layer.
See the Examples section of :class:`~qiskit.circuit.library.TwoLocal` for more
detail.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
skip_unentangled_qubits: If True, the single qubit gates are only applied to qubits
that are entangled with another qubit. If False, the single qubit gates are applied
to each qubit in the Ansatz. Defaults to False.
skip_unentangled_qubits: If True, the single qubit gates are only applied to qubits
that are entangled with another qubit. If False, the single qubit gates are applied
to each qubit in the Ansatz. Defaults to False.
skip_final_rotation_layer: If True, a rotation layer is added at the end of the
ansatz. If False, no rotation layer is added. Defaults to True.
parameter_prefix: The parameterized gates require a parameter to be defined, for which
we use :class:`~qiskit.circuit.ParameterVector`.
insert_barriers: If True, barriers are inserted in between each layer. If False,
no barriers are inserted.
Raises:
ValueError: If the selected mode is not supported.
"""
supported_modes = ['iswap', 'fsim']
if mode not in supported_modes:
raise ValueError('Unsupported mode {}, choose one of {}'.format(mode, supported_modes))
theta = Parameter('ΞΈ')
swap = QuantumCircuit(2, name='Interaction')
swap.rxx(theta, 0, 1)
swap.ryy(theta, 0, 1)
if mode == 'fsim':
phi = Parameter('Ο')
swap.cp(phi, 0, 1)
super().__init__(num_qubits=num_qubits,
rotation_blocks=RZGate,
entanglement_blocks=swap,
entanglement=entanglement,
reps=reps,
skip_unentangled_qubits=skip_unentangled_qubits,
skip_final_rotation_layer=skip_final_rotation_layer,
parameter_prefix=parameter_prefix,
insert_barriers=insert_barriers,
initial_state=initial_state)
@property
def parameter_bounds(self) -> List[Tuple[float, float]]:
"""Return the parameter bounds.
Returns:
The parameter bounds.
"""
return self.num_parameters * [(-pi, pi)]
|
gadial/qiskit-terra | test/python/circuit/library/test_grover_operator.py |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the grover operator."""
import unittest
import numpy as np
from qiskit.test.base import QiskitTestCase
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library import GroverOperator
from qiskit.converters import circuit_to_dag
from qiskit.quantum_info import Operator, Statevector, DensityMatrix
class TestGroverOperator(QiskitTestCase):
"""Test the Grover operator."""
def assertGroverOperatorIsCorrect(self, grover_op, oracle, state_in=None, zero_reflection=None):
"""Test that ``grover_op`` implements the correct Grover operator."""
oracle = Operator(oracle)
if state_in is None:
state_in = QuantumCircuit(oracle.num_qubits)
state_in.h(state_in.qubits)
state_in = Operator(state_in)
if zero_reflection is None:
zero_reflection = np.eye(2 ** oracle.num_qubits)
zero_reflection[0][0] = -1
zero_reflection = Operator(zero_reflection)
expected = state_in.dot(zero_reflection).dot(state_in.adjoint()).dot(oracle)
self.assertTrue(Operator(grover_op).equiv(expected))
def test_grover_operator(self):
"""Test the base case for the Grover operator."""
with self.subTest('single Z oracle'):
oracle = QuantumCircuit(3)
oracle.z(2) # good state if last qubit is 1
grover_op = GroverOperator(oracle)
self.assertGroverOperatorIsCorrect(grover_op, oracle)
with self.subTest('target state x0x1'):
oracle = QuantumCircuit(4)
oracle.x(1)
oracle.z(1)
oracle.x(1)
oracle.z(3)
grover_op = GroverOperator(oracle)
self.assertGroverOperatorIsCorrect(grover_op, oracle)
def test_quantum_info_input(self):
"""Test passing quantum_info.Operator and Statevector as input."""
mark = Statevector.from_label('001')
diffuse = 2 * DensityMatrix.from_label('000') - Operator.from_label('III')
grover_op = GroverOperator(oracle=mark, zero_reflection=diffuse)
self.assertGroverOperatorIsCorrect(grover_op,
oracle=np.diag((-1) ** mark.data),
zero_reflection=diffuse.data)
def test_reflection_qubits(self):
"""Test setting idle qubits doesn't apply any operations on these qubits."""
oracle = QuantumCircuit(4)
oracle.z(3)
grover_op = GroverOperator(oracle, reflection_qubits=[0, 3])
dag = circuit_to_dag(grover_op)
self.assertEqual(set(dag.idle_wires()),
{dag.qubits[1], dag.qubits[2]})
def test_custom_state_in(self):
"""Test passing a custom state_in operator."""
oracle = QuantumCircuit(1)
oracle.z(0)
bernoulli = QuantumCircuit(1)
sampling_probability = 0.2
bernoulli.ry(2 * np.arcsin(np.sqrt(sampling_probability)), 0)
grover_op = GroverOperator(oracle, bernoulli)
self.assertGroverOperatorIsCorrect(grover_op, oracle, bernoulli)
def test_custom_zero_reflection(self):
"""Test passing in a custom zero reflection."""
oracle = QuantumCircuit(1)
oracle.z(0)
zero_reflection = QuantumCircuit(1)
zero_reflection.x(0)
zero_reflection.rz(np.pi, 0)
zero_reflection.x(0)
grover_op = GroverOperator(oracle, zero_reflection=zero_reflection)
with self.subTest('zero reflection up to phase works'):
self.assertGroverOperatorIsCorrect(grover_op, oracle)
with self.subTest('circuits match'):
expected = QuantumCircuit(*grover_op.qregs, global_phase=np.pi)
expected.compose(oracle, inplace=True)
expected.h(0) # state_in is H
expected.compose(zero_reflection, inplace=True)
expected.h(0)
self.assertEqual(expected, grover_op)
def test_num_mcx_ancillas(self):
"""Test the number of ancilla bits for the mcx gate in zero_reflection."""
oracle = QuantumCircuit(7)
oracle.x(6)
oracle.h(6)
oracle.ccx(0, 1, 4)
oracle.ccx(2, 3, 5)
oracle.ccx(4, 5, 6)
oracle.h(6)
oracle.x(6)
grover_op = GroverOperator(oracle, reflection_qubits=[0, 1])
self.assertEqual(grover_op.width(), 7)
if __name__ == '__main__':
unittest.main()
|
gadial/qiskit-terra | qiskit/transpiler/passes/__init__.py | <filename>qiskit/transpiler/passes/__init__.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===================================================
Transpiler Passes (:mod:`qiskit.transpiler.passes`)
===================================================
.. currentmodule:: qiskit.transpiler.passes
Layout Selection (Placement)
============================
.. autosummary::
:toctree: ../stubs/
SetLayout
TrivialLayout
DenseLayout
NoiseAdaptiveLayout
SabreLayout
CSPLayout
ApplyLayout
Layout2qDistance
EnlargeWithAncilla
FullAncillaAllocation
Routing
=======
.. autosummary::
:toctree: ../stubs/
BasicSwap
LookaheadSwap
StochasticSwap
SabreSwap
Basis Change
============
.. autosummary::
:toctree: ../stubs/
Unroller
Unroll3qOrMore
Decompose
UnrollCustomDefinitions
BasisTranslator
Optimizations
=============
.. autosummary::
:toctree: ../stubs/
Optimize1qGates
Optimize1qGatesDecomposition
Collect2qBlocks
ConsolidateBlocks
CXCancellation
CommutationAnalysis
CommutativeCancellation
RemoveDiagonalGatesBeforeMeasure
RemoveResetInZeroState
CrosstalkAdaptiveSchedule
TemplateOptimization
Scheduling
=============
.. autosummary::
:toctree: ../stubs/
ALAPSchedule
ASAPSchedule
RZXCalibrationBuilder
Circuit Analysis
================
.. autosummary::
:toctree: ../stubs/
Width
Depth
Size
CountOps
CountOpsLongestPath
NumTensorFactors
DAGLongestPath
Synthesis
=============
.. autosummary::
:toctree: ../stubs/
UnitarySynthesis
Additional Passes
=================
.. autosummary::
:toctree: ../stubs/
CheckMap
CheckCXDirection
CheckGateDirection
CXDirection
GateDirection
MergeAdjacentBarriers
BarrierBeforeFinalMeasurements
RemoveFinalMeasurements
DAGFixedPoint
FixedPoint
"""
# layout selection (placement)
from .layout import SetLayout
from .layout import TrivialLayout
from .layout import DenseLayout
from .layout import NoiseAdaptiveLayout
from .layout import SabreLayout
from .layout import CSPLayout
from .layout import ApplyLayout
from .layout import Layout2qDistance
from .layout import EnlargeWithAncilla
from .layout import FullAncillaAllocation
# routing
from .routing import BasicSwap
from .routing import LayoutTransformation
from .routing import LookaheadSwap
from .routing import StochasticSwap
from .routing import SabreSwap
# basis change
from .basis import Decompose
from .basis import Unroller
from .basis import UnrollCustomDefinitions
from .basis import Unroll3qOrMore
from .basis import BasisTranslator
# optimization
from .optimization import Optimize1qGates
from .optimization import Optimize1qGatesDecomposition
from .optimization import Collect2qBlocks
from .optimization import ConsolidateBlocks
from .optimization import CommutationAnalysis
from .optimization import CommutativeCancellation
from .optimization import CXCancellation
from .optimization import OptimizeSwapBeforeMeasure
from .optimization import RemoveResetInZeroState
from .optimization import RemoveDiagonalGatesBeforeMeasure
from .optimization import CrosstalkAdaptiveSchedule
from .optimization import HoareOptimizer
from .optimization import TemplateOptimization
# circuit analysis
from .analysis import ResourceEstimation
from .analysis import Depth
from .analysis import Size
from .analysis import Width
from .analysis import CountOps
from .analysis import CountOpsLongestPath
from .analysis import NumTensorFactors
from .analysis import DAGLongestPath
# synthesis
from .synthesis import UnitarySynthesis
# circuit scheduling
from .scheduling import ALAPSchedule
from .scheduling import ASAPSchedule
from .scheduling import RZXCalibrationBuilder
from .scheduling import TimeUnitConversion
# additional utility passes
from .utils import CheckMap
from .utils import CheckCXDirection # Deprecated
from .utils import CXDirection # Deprecated
from .utils import CheckGateDirection
from .utils import GateDirection
from .utils import BarrierBeforeFinalMeasurements
from .utils import RemoveFinalMeasurements
from .utils import MergeAdjacentBarriers
from .utils import DAGFixedPoint
from .utils import FixedPoint
from .utils import Error
|
gadial/qiskit-terra | qiskit/circuit/classicalfunction/boolean_expression.py | <gh_stars>1-10
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A quantum oracle constructed from a logical expression or a string in the DIMACS format."""
from typing import Callable, Optional
from os.path import basename, isfile
from qiskit.circuit import QuantumCircuit
from qiskit.exceptions import MissingOptionalLibraryError
from .classical_element import ClassicalElement
from .utils import HAS_TWEEDLEDUM
class BooleanExpression(ClassicalElement):
"""The Boolean Expression gate."""
def __init__(self, expression: str, name: str = None) -> None:
"""
Args:
expression (str): The logical expression string.
name (str): Optional. Instruction gate name. Otherwise part of
the expression is going to be used.
Raises:
MissingOptionalLibraryError: If tweedledum is not installed. Tweedledum is required.
"""
if not HAS_TWEEDLEDUM:
raise MissingOptionalLibraryError(
libname='tweedledum',
name='BooleanExpression compiler',
pip_install='pip install tweedledum')
from tweedledum import BoolFunction
self._tweedledum_bool_expression = BoolFunction.from_expression(expression)
short_expr_for_name = (expression[:10] + '...') if len(expression) > 13 else expression
num_qubits = (self._tweedledum_bool_expression.num_outputs() +
self._tweedledum_bool_expression.num_inputs())
super().__init__(name or short_expr_for_name, num_qubits=num_qubits, params=[])
def simulate(self, bitstring: str) -> bool:
"""Evaluate the expression on a bitstring.
This evaluation is done classically.
Args:
bitstring: The bitstring for which to evaluate.
Returns:
bool: result of the evaluation.
"""
from tweedledum import BitVec
bits = []
for bit in bitstring:
bits.append(BitVec(1, bit))
return bool(self._tweedledum_bool_expression.simulate(*bits))
def synth(self, registerless: bool = True,
synthesizer: Optional[Callable[["BooleanExpression"], QuantumCircuit]] = None):
"""Synthesis the logic network into a :class:`~qiskit.circuit.QuantumCircuit`.
Args:
registerless: Default ``True``. If ``False`` uses the parameter names
to create registers with those names. Otherwise, creates a circuit with a flat
quantum register.
synthesizer: A callable that takes self and returns a Tweedledum
circuit.
Returns:
QuantumCircuit: A circuit implementing the logic network.
"""
if registerless:
qregs = None
else:
qregs = None # TODO: Probably from self._tweedledum_bool_expression._signature
if synthesizer is None:
from tweedledum.synthesis import pkrm_synth # pylint: disable=no-name-in-module
from .utils import tweedledum2qiskit
truth_table = self._tweedledum_bool_expression.truth_table(output_bit=0)
return tweedledum2qiskit(pkrm_synth(truth_table), name=self.name, qregs=qregs)
return synthesizer(self)
def _define(self):
"""The definition of the boolean expression is its synthesis"""
self.definition = self.synth()
@classmethod
def from_dimacs_file(cls, filename: str):
"""Create a BooleanExpression from the string in the DIMACS format.
Args:
filename: A file in DIMACS format.
Returns:
BooleanExpression: A gate for the input string
Raises:
MissingOptionalLibraryError: If tweedledum is not installed. Tweedledum is required.
FileNotFoundError: If filename is not found.
"""
if not HAS_TWEEDLEDUM:
raise MissingOptionalLibraryError(
libname='tweedledum',
name='BooleanExpression compiler',
pip_install='pip install tweedledum')
from tweedledum import BoolFunction
expr_obj = cls.__new__(cls)
if not isfile(filename):
raise FileNotFoundError('The file %s does not exists.' % filename)
expr_obj._tweedledum_bool_expression = BoolFunction.from_dimacs_file(filename)
num_qubits = (expr_obj._tweedledum_bool_expression.num_inputs() +
expr_obj._tweedledum_bool_expression.num_outputs())
super(BooleanExpression, expr_obj).__init__( # pylint: disable=no-value-for-parameter
name=basename(filename), num_qubits=num_qubits, params=[])
return expr_obj
|
gadial/qiskit-terra | test/python/compiler/test_compiler.py | <reponame>gadial/qiskit-terra<filename>test/python/compiler/test_compiler.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Compiler Test."""
import os
import unittest
from qiskit import BasicAer
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.transpiler import PassManager
from qiskit import execute
from qiskit.circuit.library import U1Gate, U2Gate
from qiskit.compiler import transpile, assemble
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeRueschlikon, FakeTenerife
from qiskit.qobj import QasmQobj
class TestCompiler(QiskitTestCase):
"""Qiskit Compiler Tests."""
def setUp(self):
super().setUp()
self.seed_simulator = 42
self.backend = BasicAer.get_backend("qasm_simulator")
def test_example_multiple_compile(self):
"""Test a toy example compiling multiple circuits.
Pass if the results are correct.
"""
backend = BasicAer.get_backend('qasm_simulator')
coupling_map = [[0, 1], [0, 2],
[1, 2],
[3, 2], [3, 4],
[4, 2]]
qr = QuantumRegister(5)
cr = ClassicalRegister(5)
bell = QuantumCircuit(qr, cr)
ghz = QuantumCircuit(qr, cr)
# Create a GHZ state
ghz.h(qr[0])
for i in range(4):
ghz.cx(qr[i], qr[i + 1])
# Insert a barrier before measurement
ghz.barrier()
# Measure all of the qubits in the standard basis
for i in range(5):
ghz.measure(qr[i], cr[i])
# Create a Bell state
bell.h(qr[0])
bell.cx(qr[0], qr[1])
bell.barrier()
bell.measure(qr[0], cr[0])
bell.measure(qr[1], cr[1])
shots = 2048
bell_backend = transpile(bell, backend=backend)
ghz_backend = transpile(ghz, backend=backend,
coupling_map=coupling_map)
bell_qobj = assemble(bell_backend, shots=shots,
seed_simulator=10)
ghz_qobj = assemble(ghz_backend, shots=shots,
seed_simulator=10)
bell_result = backend.run(bell_qobj).result()
ghz_result = backend.run(ghz_qobj).result()
threshold = 0.05 * shots
counts_bell = bell_result.get_counts()
target_bell = {'00000': shots / 2, '00011': shots / 2}
self.assertDictAlmostEqual(counts_bell, target_bell, threshold)
counts_ghz = ghz_result.get_counts()
target_ghz = {'00000': shots / 2, '11111': shots / 2}
self.assertDictAlmostEqual(counts_ghz, target_ghz, threshold)
def test_compile_coupling_map(self):
"""Test compile_coupling_map.
If all correct should return data with the same stats. The circuit may
be different.
"""
backend = BasicAer.get_backend('qasm_simulator')
qr = QuantumRegister(3, 'qr')
cr = ClassicalRegister(3, 'cr')
qc = QuantumCircuit(qr, cr, name='qccccccc')
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.cx(qr[0], qr[2])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.measure(qr[2], cr[2])
shots = 2048
coupling_map = [[0, 1], [1, 2]]
initial_layout = [0, 1, 2]
qc_b = transpile(qc, backend=backend,
coupling_map=coupling_map,
initial_layout=initial_layout)
qobj = assemble(qc_b, shots=shots, seed_simulator=88)
job = backend.run(qobj)
result = job.result()
qasm_to_check = qc.qasm()
self.assertEqual(len(qasm_to_check), 173)
counts = result.get_counts(qc)
target = {'000': shots / 2, '111': shots / 2}
threshold = 0.05 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_example_swap_bits(self):
"""Test a toy example swapping a set bit around.
Uses the mapper. Pass if results are correct.
"""
backend = BasicAer.get_backend('qasm_simulator')
coupling_map = [[0, 1], [0, 8], [1, 2], [1, 9], [2, 3], [2, 10],
[3, 4], [3, 11], [4, 5], [4, 12], [5, 6], [5, 13],
[6, 7], [6, 14], [7, 15], [8, 9], [9, 10], [10, 11],
[11, 12], [12, 13], [13, 14], [14, 15]]
n = 3 # make this at least 3
qr0 = QuantumRegister(n)
qr1 = QuantumRegister(n)
ans = ClassicalRegister(2 * n)
qc = QuantumCircuit(qr0, qr1, ans)
# Set the first bit of qr0
qc.x(qr0[0])
# Swap the set bit
qc.swap(qr0[0], qr0[n - 1])
qc.swap(qr0[n - 1], qr1[n - 1])
qc.swap(qr1[n - 1], qr0[1])
qc.swap(qr0[1], qr1[1])
# Insert a barrier before measurement
qc.barrier()
# Measure all of the qubits in the standard basis
for j in range(n):
qc.measure(qr0[j], ans[j])
qc.measure(qr1[j], ans[j + n])
# First version: no mapping
result = execute(qc, backend=backend,
coupling_map=None, shots=1024,
seed_simulator=14).result()
self.assertEqual(result.get_counts(qc), {'010000': 1024})
# Second version: map to coupling graph
result = execute(qc, backend=backend,
coupling_map=coupling_map, shots=1024,
seed_simulator=14).result()
self.assertEqual(result.get_counts(qc), {'010000': 1024})
def test_parallel_compile(self):
"""Trigger parallel routines in compile.
"""
backend = FakeRueschlikon()
qr = QuantumRegister(16)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
for k in range(1, 15):
qc.cx(qr[0], qr[k])
qc.measure(qr[5], cr[0])
qlist = [qc for k in range(10)]
qobj = assemble(transpile(qlist, backend=backend))
self.assertEqual(len(qobj.experiments), 10)
def test_no_conflict_backend_passmanager(self):
"""execute(qc, backend=..., passmanager=...)
See: https://github.com/Qiskit/qiskit-terra/issues/5037
"""
backend = BasicAer.get_backend('qasm_simulator')
qc = QuantumCircuit(2)
qc.append(U1Gate(0), [0])
qc.measure_all()
job = execute(qc, backend=backend, pass_manager=PassManager())
result = job.result().get_counts()
self.assertEqual(result, {'00': 1024})
def test_compile_single_qubit(self):
""" Compile a single-qubit circuit in a non-trivial layout
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
layout = {qr[0]: 12}
cmap = [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4], [5, 6], [5, 9], [6, 8], [7, 8],
[9, 8], [9, 10], [11, 3], [11, 10], [11, 12], [12, 2], [13, 1], [13, 12]]
circuit2 = transpile(circuit, backend=None, coupling_map=cmap, basis_gates=['u2'],
initial_layout=layout)
qobj = assemble(circuit2)
compiled_instruction = qobj.experiments[0].instructions[0]
self.assertEqual(compiled_instruction.name, 'u2')
self.assertEqual(compiled_instruction.qubits, [12])
self.assertEqual(compiled_instruction.params, [0, 3.141592653589793])
def test_compile_pass_manager(self):
"""Test compile with and without an empty pass manager."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.append(U1Gate(3.14), [qr[0]])
qc.append(U2Gate(3.14, 1.57), [qr[0]])
qc.barrier(qr)
qc.measure(qr, cr)
backend = BasicAer.get_backend('qasm_simulator')
qrtrue = assemble(transpile(qc, backend, seed_transpiler=8),
seed_simulator=42)
rtrue = backend.run(qrtrue).result()
qrfalse = assemble(PassManager().run(qc), seed_simulator=42)
rfalse = backend.run(qrfalse).result()
self.assertEqual(rtrue.get_counts(), rfalse.get_counts())
def test_mapper_overoptimization(self):
"""Check mapper overoptimization.
The mapper should not change the semantics of the input.
An overoptimization introduced issue #81:
https://github.com/Qiskit/qiskit-terra/issues/81
"""
# -X-.-----
# -Y-+-S-.-
# -Z-.-T-+-
# ---+-H---
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circ = QuantumCircuit(qr, cr)
circ.x(qr[0])
circ.y(qr[1])
circ.z(qr[2])
circ.cx(qr[0], qr[1])
circ.cx(qr[2], qr[3])
circ.s(qr[1])
circ.t(qr[2])
circ.h(qr[3])
circ.cx(qr[1], qr[2])
circ.measure(qr[0], cr[0])
circ.measure(qr[1], cr[1])
circ.measure(qr[2], cr[2])
circ.measure(qr[3], cr[3])
coupling_map = [[0, 2], [1, 2], [2, 3]]
shots = 1000
result1 = execute(circ, backend=self.backend,
coupling_map=coupling_map,
seed_simulator=self.seed_simulator,
seed_transpiler=8,
shots=shots)
count1 = result1.result().get_counts()
result2 = execute(circ, backend=self.backend,
coupling_map=None,
seed_simulator=self.seed_simulator,
seed_transpiler=8, shots=shots)
count2 = result2.result().get_counts()
self.assertDictAlmostEqual(count1, count2, shots * 0.02)
def test_grovers_circuit(self):
"""Testing a circuit originated in the Grover algorithm"""
shots = 1000
coupling_map = None
# 6-qubit grovers
qr = QuantumRegister(6)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr, name='grovers')
circuit.h(qr[0])
circuit.h(qr[1])
circuit.x(qr[2])
circuit.x(qr[3])
circuit.x(qr[0])
circuit.cx(qr[0], qr[2])
circuit.x(qr[0])
circuit.cx(qr[1], qr[3])
circuit.ccx(qr[2], qr[3], qr[4])
circuit.cx(qr[1], qr[3])
circuit.x(qr[0])
circuit.cx(qr[0], qr[2])
circuit.x(qr[0])
circuit.x(qr[1])
circuit.x(qr[4])
circuit.h(qr[4])
circuit.ccx(qr[0], qr[1], qr[4])
circuit.h(qr[4])
circuit.x(qr[0])
circuit.x(qr[1])
circuit.x(qr[4])
circuit.h(qr[0])
circuit.h(qr[1])
circuit.h(qr[4])
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.measure(qr[1], cr[1])
result = execute(circuit, backend=self.backend,
coupling_map=coupling_map,
seed_simulator=self.seed_simulator, shots=shots)
counts = result.result().get_counts()
expected_probs = {'00': 0.64,
'01': 0.117,
'10': 0.113,
'11': 0.13}
target = {key: shots * val for key, val in expected_probs.items()}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_math_domain_error(self):
"""Check for floating point errors.
The math library operates over floats and introduces floating point
errors that should be avoided.
See: https://github.com/Qiskit/qiskit-terra/issues/111
"""
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circ = QuantumCircuit(qr, cr)
circ.y(qr[0])
circ.z(qr[2])
circ.h(qr[2])
circ.cx(qr[1], qr[0])
circ.y(qr[2])
circ.t(qr[2])
circ.z(qr[2])
circ.cx(qr[1], qr[2])
circ.measure(qr[0], cr[0])
circ.measure(qr[1], cr[1])
circ.measure(qr[2], cr[2])
circ.measure(qr[3], cr[3])
coupling_map = [[0, 2], [1, 2], [2, 3]]
shots = 2000
job = execute(circ, backend=self.backend,
coupling_map=coupling_map,
seed_simulator=self.seed_simulator, shots=shots)
counts = job.result().get_counts()
target = {'0001': shots / 2, '0101': shots / 2}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_random_parameter_circuit(self):
"""Run a circuit with randomly generated parameters."""
qasm_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'qasm')
circ = QuantumCircuit.from_qasm_file(
os.path.join(qasm_dir, 'random_n5_d5.qasm'))
coupling_map = [[0, 1], [1, 2], [2, 3], [3, 4]]
shots = 1024
qobj = execute(circ, backend=self.backend,
coupling_map=coupling_map, shots=shots,
seed_simulator=self.seed_simulator)
counts = qobj.result().get_counts()
expected_probs = {
'00000': 0.079239867254200971,
'00001': 0.032859032998526903,
'00010': 0.10752610993531816,
'00011': 0.018818532050952699,
'00100': 0.054830807251011054,
'00101': 0.0034141983951965164,
'00110': 0.041649309748902276,
'00111': 0.039967731207338125,
'01000': 0.10516937819949743,
'01001': 0.026635620063700002,
'01010': 0.0053475143548793866,
'01011': 0.01940513314416064,
'01100': 0.0044028405481225047,
'01101': 0.057524760052126644,
'01110': 0.010795354134597078,
'01111': 0.026491296821535528,
'10000': 0.094827455395274859,
'10001': 0.0008373965072688836,
'10010': 0.029082297894094441,
'10011': 0.012386622870598416,
'10100': 0.018739140061148799,
'10101': 0.01367656456536896,
'10110': 0.039184170706009248,
'10111': 0.062339335178438288,
'11000': 0.00293674365989009,
'11001': 0.012848433960739968,
'11010': 0.018472497159499782,
'11011': 0.0088903691234912003,
'11100': 0.031305389080034329,
'11101': 0.0004788556283690458,
'11110': 0.002232419390471667,
'11111': 0.017684822659235985
}
target = {key: shots * val for key, val in expected_probs.items()}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
def test_yzy_zyz_cases(self):
"""yzy_to_zyz works in previously failed cases.
See: https://github.com/Qiskit/qiskit-terra/issues/607
"""
backend = FakeTenerife()
qr = QuantumRegister(2)
circ1 = QuantumCircuit(qr)
circ1.cx(qr[0], qr[1])
circ1.rz(0.7, qr[1])
circ1.rx(1.570796, qr[1])
qobj1 = assemble(transpile(circ1, backend))
self.assertIsInstance(qobj1, QasmQobj)
circ2 = QuantumCircuit(qr)
circ2.y(qr[0])
circ2.h(qr[0])
circ2.s(qr[0])
circ2.h(qr[0])
qobj2 = assemble(transpile(circ2, backend))
self.assertIsInstance(qobj2, QasmQobj)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
gadial/qiskit-terra | qiskit/circuit/library/standard_gates/rxx.py | <filename>qiskit/circuit/library/standard_gates/rxx.py<gh_stars>1-10
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Two-qubit XX-rotation gate."""
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class RXXGate(Gate):
r"""A parametric 2-qubit :math:`X \otimes X` interaction (rotation about XX).
This gate is symmetric, and is maximally entangling at :math:`\theta = \pi/2`.
**Circuit Symbol:**
.. parsed-literal::
βββββββββββ
q_0: β€1 β
β Rxx(Ο΄) β
q_1: β€0 β
βββββββββββ
**Matrix Representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
R_{XX}(\theta) = exp(-i \th X{\otimes}X) =
\begin{pmatrix}
\cos(\th) & 0 & 0 & -i\sin(\th) \\
0 & \cos(\th) & -i\sin(\th) & 0 \\
0 & -i\sin(\th) & \cos(\th) & 0 \\
-i\sin(\th) & 0 & 0 & \cos(\th)
\end{pmatrix}
**Examples:**
.. math::
R_{XX}(\theta = 0) = I
.. math::
R_{XX}(\theta = \pi) = i X \otimes X
.. math::
R_{XX}(\theta = \frac{\pi}{2}) = \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & 0 & 0 & -i \\
0 & 1 & -i & 0 \\
0 & -i & 1 & 0 \\
-i & 0 & 0 & 1
\end{pmatrix}
"""
def __init__(self, theta):
"""Create new RXX gate."""
super().__init__('rxx', 2, [theta])
def _define(self):
"""Calculate a subcircuit that implements this unitary."""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate
from .h import HGate
from .rz import RZGate
theta = self.params[0]
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(HGate(), [q[0]], []),
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def inverse(self):
"""Return inverse RXX gate (i.e. with the negative rotation angle)."""
return RXXGate(-self.params[0])
def __array__(self, dtype=None):
"""Return a Numpy.array for the RXX gate."""
import numpy
theta2 = float(self.params[0]) / 2
cos = numpy.cos(theta2)
isin = 1j * numpy.sin(theta2)
return numpy.array([
[cos, 0, 0, -isin],
[0, cos, -isin, 0],
[0, -isin, cos, 0],
[-isin, 0, 0, cos]], dtype=dtype)
|
gadial/qiskit-terra | qiskit/opflow/evolutions/evolved_op.py | <filename>qiskit/opflow/evolutions/evolved_op.py<gh_stars>1-10
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" EvolutionOp Class """
from typing import List, Optional, Set, Union, cast
import numpy as np
import scipy
from qiskit.circuit import Instruction, ParameterExpression
from qiskit.opflow.exceptions import OpflowError
from qiskit.opflow.list_ops.composed_op import ComposedOp
from qiskit.opflow.list_ops.list_op import ListOp
from qiskit.opflow.list_ops.summed_op import SummedOp
from qiskit.opflow.list_ops.tensored_op import TensoredOp
from qiskit.opflow.operator_base import OperatorBase
from qiskit.opflow.primitive_ops.matrix_op import MatrixOp
from qiskit.opflow.primitive_ops.primitive_op import PrimitiveOp
from qiskit.quantum_info import Statevector
class EvolvedOp(PrimitiveOp):
r"""
Class for wrapping Operator Evolutions for compilation (``convert``) by an EvolutionBase
method later, essentially acting as a placeholder. Note that EvolvedOp is a weird case of
PrimitiveOp. It happens to be that it fits into the PrimitiveOp interface nearly perfectly,
and it essentially represents a placeholder for a PrimitiveOp later, even though it doesn't
actually hold a primitive object. We could have chosen for it to be an OperatorBase,
but would have ended up copying and pasting a lot of code from PrimitiveOp."""
primitive: PrimitiveOp
def __init__(self,
primitive: OperatorBase,
coeff: Union[complex, ParameterExpression] = 1.0) -> None:
"""
Args:
primitive: The operator being wrapped to signify evolution later.
coeff: A coefficient multiplying the operator
"""
super().__init__(primitive, coeff=coeff)
def primitive_strings(self) -> Set[str]:
return self.primitive.primitive_strings()
@property
def num_qubits(self) -> int:
return self.primitive.num_qubits
def add(self, other: OperatorBase) -> Union["EvolvedOp", SummedOp]:
if not self.num_qubits == other.num_qubits:
raise ValueError(
'Sum over operators with different numbers of qubits, {} and {}, is not well '
'defined'.format(self.num_qubits, other.num_qubits))
if isinstance(other, EvolvedOp) and self.primitive == other.primitive:
return EvolvedOp(self.primitive, coeff=self.coeff + other.coeff)
if isinstance(other, SummedOp):
op_list = [cast(OperatorBase, self)] + other.oplist
return SummedOp(op_list)
return SummedOp([self, other])
def adjoint(self) -> "EvolvedOp":
return EvolvedOp(
self.primitive.adjoint() * -1,
coeff=self.coeff.conjugate()
)
def equals(self, other: OperatorBase) -> bool:
if not isinstance(other, EvolvedOp) or not self.coeff == other.coeff:
return False
return self.primitive == other.primitive
def tensor(self, other: OperatorBase) -> TensoredOp:
if isinstance(other, TensoredOp):
return TensoredOp([cast(OperatorBase, self)] + other.oplist)
return TensoredOp([self, other])
def _expand_dim(self, num_qubits: int) -> TensoredOp:
# pylint: disable=cyclic-import
from ..operator_globals import I
return self.tensor(I ^ num_qubits)
def permute(self, permutation: List[int]) -> "EvolvedOp":
return EvolvedOp(self.primitive.permute(permutation), coeff=self.coeff)
def compose(self, other: OperatorBase,
permutation: Optional[List[int]] = None, front: bool = False) -> OperatorBase:
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
if front:
return other.compose(new_self)
if isinstance(other, ComposedOp):
return ComposedOp([new_self] + other.oplist)
return ComposedOp([new_self, other])
def __str__(self) -> str:
prim_str = str(self.primitive)
if self.coeff == 1.0:
return 'e^(-i*{})'.format(prim_str)
else:
return "{} * e^(-i*{})".format(self.coeff, prim_str)
def __repr__(self) -> str:
return "EvolvedOp({}, coeff={})".format(repr(self.primitive), self.coeff)
def reduce(self) -> "EvolvedOp":
return EvolvedOp(self.primitive.reduce(), coeff=self.coeff)
def assign_parameters(self, param_dict: dict) -> Union["EvolvedOp", ListOp]:
param_value = self.coeff
if isinstance(self.coeff, ParameterExpression):
unrolled_dict = self._unroll_param_dict(param_dict)
if isinstance(unrolled_dict, list):
return ListOp([self.assign_parameters(param_dict) for param_dict in unrolled_dict])
if self.coeff.parameters <= set(unrolled_dict.keys()):
binds = {param: unrolled_dict[param] for param in self.coeff.parameters}
param_value = float(self.coeff.bind(binds))
return EvolvedOp(self.primitive.bind_parameters(param_dict), coeff=param_value)
def eval(
self, front: Optional[Union[str, dict, np.ndarray, OperatorBase, Statevector]] = None
) -> Union[OperatorBase, complex]:
return cast(Union[OperatorBase, complex], self.to_matrix_op().eval(front=front))
def to_matrix(self, massive: bool = False) -> np.ndarray:
if (
isinstance(self.primitive, ListOp)
and self.primitive.__class__.__name__ == ListOp.__name__
):
return np.array([
op.exp_i().to_matrix(massive=massive)
* self.primitive.coeff
* self.coeff
for op in self.primitive.oplist
], dtype=complex)
prim_mat = -1.j * self.primitive.to_matrix()
return scipy.linalg.expm(prim_mat) * self.coeff
def to_matrix_op(self, massive: bool = False) -> Union[ListOp, MatrixOp]:
""" Returns a ``MatrixOp`` equivalent to this Operator. """
primitive = self.primitive
if isinstance(primitive, ListOp) and primitive.__class__.__name__ == ListOp.__name__:
return ListOp(
[op.exp_i().to_matrix_op() for op in primitive.oplist],
coeff=primitive.coeff * self.coeff)
prim_mat = EvolvedOp(primitive).to_matrix(massive=massive)
return MatrixOp(prim_mat, coeff=self.coeff)
def log_i(self, massive: bool = False) -> OperatorBase:
return self.primitive * self.coeff
# pylint: disable=arguments-differ
def to_instruction(self, massive: bool = False) -> Instruction:
mat_op = self.primitive.to_matrix_op(massive=massive)
if not isinstance(mat_op, MatrixOp):
raise OpflowError("to_instruction is not allowed for ListOp.")
return mat_op.to_instruction()
|
gadial/qiskit-terra | qiskit/circuit/library/standard_gates/ecr.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Two-qubit ZX-rotation gate."""
import numpy as np
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
from .rzx import RZXGate
from .x import XGate
class ECRGate(Gate):
r"""An echoed RZX(pi/2) gate implemented using RZX(pi/4) and RZX(-pi/4).
This gate is maximally entangling and is equivalent to a CNOT up to
single-qubit pre-rotations. The echoing procedure mitigates some
unwanted terms (terms other than ZX) to cancel in an experiment.
**Circuit Symbol:**
.. parsed-literal::
βββββββββββ βββββββββββββββββββββββββββββββββββββββ
q_0: β€0 β q_0: β€0 ββ€ RX(pi) ββ€0 β
β ECR β = β RZX(pi/4) ββββββββββββ RZX(-pi/4) β
q_1: β€1 β q_1: β€1 ββββββββββββ€1 β
βββββββββββ ββββββββββββββ βββββββββββββββ
**Matrix Representation:**
.. math::
ECR\ q_0, q_1 = \frac{1}{\sqrt{2}}
\begin{pmatrix}
0 & 1 & 0 & i \\
1 & 0 & -i & 0 \\
0 & i & 0 & 1 \\
-i & 0 & 1 & 0
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In the above example we apply the gate
on (q_0, q_1) which results in the :math:`X \otimes Z` tensor order.
Instead, if we apply it on (q_1, q_0), the matrix will
be :math:`Z \otimes X`:
.. parsed-literal::
βββββββββββ
q_0: β€1 β
β ECR β
q_1: β€0 β
βββββββββββ
.. math::
ECR\ q_0, q_1 = \frac{1}{\sqrt{2}}
\begin{pmatrix}
0 & 0 & 1 & i \\
0 & 0 & i & 1 \\
1 & -i & 0 & 0 \\
-i & 1 & 0 & 0
\end{pmatrix}
"""
def __init__(self):
"""Create new ECR gate."""
super().__init__('ecr', 2, [])
def _define(self):
"""
gate ecr a, b { rzx(pi/4) a, b; x a; rzx(-pi/4) a, b;}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(RZXGate(np.pi/4), [q[0], q[1]], []),
(XGate(), [q[0]], []),
(RZXGate(-np.pi/4), [q[0], q[1]], [])
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def to_matrix(self):
"""Return a numpy.array for the ECR gate."""
return 1/np.sqrt(2) * \
np.array([[0, 1, 0, 1.j],
[1, 0, -1.j, 0],
[0, 1.j, 0, 1],
[-1.j, 0, 1, 0]], dtype=complex)
|
gadial/qiskit-terra | qiskit/algorithms/linear_solvers/linear_solver.py | <filename>qiskit/algorithms/linear_solvers/linear_solver.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An abstract class for linear systems solvers."""
from abc import ABC, abstractmethod
from typing import Union, Optional, List, Callable
import numpy as np
from qiskit import QuantumCircuit
from qiskit.result import Result
from qiskit.quantum_info.operators.base_operator import BaseOperator
from .observables.linear_system_observable import LinearSystemObservable
from ..algorithm_result import AlgorithmResult
class LinearSolverResult(AlgorithmResult):
"""A base class for linear systems results.
The linear systems algorithms return an object of the type ``LinearSystemsResult``
with the information about the solution obtained.
"""
def __init__(self) -> None:
super().__init__()
# Set the default to None, if the algorithm knows how to calculate it can override it.
self._state = None
self._observable = None
self._euclidean_norm = None
self._circuit_results = None
@property
def observable(self) -> Union[float, List[float]]:
"""return the (list of) calculated observable(s)"""
return self._observable
@observable.setter
def observable(self, observable: Union[float, List[float]]) -> None:
"""Set the value(s) of the observable(s).
Args:
observable: The new value(s) of the observable(s).
"""
self._observable = observable
@property
def state(self) -> Union[QuantumCircuit, np.ndarray]:
"""return either the circuit that prepares the solution or the solution as a vector"""
return self._state
@state.setter
def state(self, state: Union[QuantumCircuit, np.ndarray]) -> None:
"""Set the solution state as either the circuit that prepares it or as a vector.
Args:
state: The new solution state.
"""
self._state = state
@property
def euclidean_norm(self) -> float:
"""return the euclidean norm if the algorithm knows how to calculate it"""
return self._euclidean_norm
@euclidean_norm.setter
def euclidean_norm(self, norm: float) -> None:
"""Set the euclidean norm of the solution.
Args:
norm: The new euclidean norm of the solution.
"""
self._euclidean_norm = norm
@property
def circuit_results(self) -> Union[List[float], List[Result]]:
"""return the results from the circuits"""
return self._circuit_results
@circuit_results.setter
def circuit_results(self, results: Union[List[float], List[Result]]):
self._circuit_results = results
class LinearSolver(ABC):
"""An abstract class for linear system solvers in Qiskit."""
@abstractmethod
def solve(self, matrix: Union[np.ndarray, QuantumCircuit],
vector: Union[np.ndarray, QuantumCircuit],
observable: Optional[Union[LinearSystemObservable, BaseOperator,
List[LinearSystemObservable], List[BaseOperator]]] = None,
observable_circuit: Optional[Union[QuantumCircuit, List[QuantumCircuit]]] = None,
post_processing: Optional[Callable[[Union[float, List[float]]],
Union[float, List[float]]]] = None) \
-> LinearSolverResult:
"""Solve the system and compute the observable(s)
Args:
matrix: The matrix specifying the system, i.e. A in Ax=b.
vector: The vector specifying the right hand side of the equation in Ax=b.
observable: Optional information to be extracted from the solution.
Default is the probability of success of the algorithm.
observable_circuit: Optional circuit to be applied to the solution to extract
information. Default is ``None``.
post_processing: Optional function to compute the value of the observable.
Default is the raw value of measuring the observable.
Returns:
The result of the linear system.
"""
raise NotImplementedError
|
gadial/qiskit-terra | qiskit/quantum_info/operators/mixins/group.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Mixin for gate operator interface.
"""
from abc import ABC, abstractmethod
from numbers import Integral
from qiskit.exceptions import QiskitError
from qiskit.utils.deprecation import deprecate_function
class GroupMixin(ABC):
"""Abstract Mixin for operator group operations.
This class defines the following methods
- :meth:`compose`
- :meth:`dot`
- :meth:`tensor`
- :meth:`expand`
- :meth:`power`
And the following operator overloads:
- ``&``, ``__and__`` -> :meth:`compose`
- ``^``, ``__xor__`` -> `:meth:`tensor`
- ``**``, ``__pow__`` -> :meth:`power`
The following deprecated overloads are also defined:
- ``*``, ``__mul__`` -> :meth:`dot`
- ``@``, ``__matmul__`` -> :meth:`compose`
The following abstract methods must be implemented by subclasses
using this mixin
- ``compose(self, other, qargs=None, inplace=False)``
- ``tensor(self, other)``
- ``expand(self, other)``
"""
@deprecate_function(
'Using the `__mul__` operator `A * B` as shorthand for'
' `A.dot(B)` is deprecated as of version 0.17.0 and will be '
' removed no earlier than 3 months after the release date.'
' As an alternative, use the compose operator `B & A`'
' in place of `A * B` as a replacement.')
def __mul__(self, other):
return self.dot(other)
@deprecate_function(
'Using the `__matmul__` operator `A @ B` as shorthand for'
' `A.compose(B)` is deprecated as of version 0.17.0 and will be '
' removed no earlier than 3 months after the release date.'
' Use the `A & B` instead.')
def __matmul__(self, other):
return self.compose(other)
def __and__(self, other):
return self.compose(other)
def __pow__(self, n):
return self.power(n)
def __xor__(self, other):
return self.tensor(other)
@abstractmethod
def tensor(self, other):
r"""Return the tensor product with another CLASS.
Args:
other (CLASS): a CLASS object.
Returns:
CLASS: the tensor product :math:`a \otimes b`, where :math:`a`
is the current CLASS, and :math:`b` is the other CLASS.
.. note::
The tensor product can be obtained using the ``^`` binary operator.
Hence ``a.tensor(b)`` is equivalent to ``a ^ b``.
.. note:
Tensor uses reversed operator ordering to :meth:`expand`.
For two operators of the same type ``a.tensor(b) = b.expand(a)``.
"""
@abstractmethod
def expand(self, other):
r"""Return the reverse-order tensor product with another CLASS.
Args:
other (CLASS): a CLASS object.
Returns:
CLASS: the tensor product :math:`b \otimes a`, where :math:`a`
is the current CLASS, and :math:`b` is the other CLASS.
.. note:
Expand is the opposite operator ordering to :meth:`tensor`.
For two operators of the same type ``a.expand(b) = b.tensor(a)``.
"""
@abstractmethod
def compose(self, other, qargs=None, front=False):
"""Return the operator composition with another CLASS.
Args:
other (CLASS): a CLASS object.
qargs (list or None): Optional, a list of subsystem positions to
apply other on. If None apply on all
subsystems (default: None).
front (bool): If True compose using right operator multiplication,
instead of left multiplication [default: False].
Returns:
CLASS: The composed CLASS.
Raises:
QiskitError: if other cannot be converted to an operator, or has
incompatible dimensions for specified subsystems.
.. note::
Composition (``&``) by default is defined as `left` matrix multiplication for
matrix operators, while :meth:`dot` is defined as `right` matrix
multiplication. That is that ``A & B == A.compose(B)`` is equivalent to
``B.dot(A)`` when ``A`` and ``B`` are of the same type.
Setting the ``front=True`` kwarg changes this to `right` matrix
multiplication and is equivalent to the :meth:`dot` method
``A.dot(B) == A.compose(B, front=True)``.
"""
def dot(self, other, qargs=None):
"""Return the right multiplied operator self * other.
Args:
other (CLASS): an operator object.
qargs (list or None): Optional, a list of subsystem positions to
apply other on. If None apply on all
subsystems (default: None).
Returns:
CLASS: The right matrix multiplied CLASS.
"""
return self.compose(other, qargs=qargs, front=True)
def power(self, n):
"""Return the compose of a operator with itself n times.
Args:
n (int): the number of times to compose with self (n>0).
Returns:
CLASS: the n-times composed operator.
Raises:
QiskitError: if the input and output dimensions of the operator
are not equal, or the power is not a positive integer.
"""
# NOTE: if a subclass can have negative or non-integer powers
# this method should be overridden in that class.
if not isinstance(n, Integral) or n < 1:
raise QiskitError("Can only power with positive integer powers.")
ret = self
for _ in range(1, n):
ret = ret.dot(self)
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.