repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
liqi328/rjrepaircompany
refs/heads/master
django/contrib/comments/managers.py
417
from django.db import models from django.contrib.contenttypes.models import ContentType from django.utils.encoding import force_unicode class CommentManager(models.Manager): def in_moderation(self): """ QuerySet for all comments currently in the moderation queue. """ return self.get_query_set().filter(is_public=False, is_removed=False) def for_model(self, model): """ QuerySet for all comments for a particular model (either an instance or a class). """ ct = ContentType.objects.get_for_model(model) qs = self.get_query_set().filter(content_type=ct) if isinstance(model, models.Model): qs = qs.filter(object_pk=force_unicode(model._get_pk_val())) return qs
miguelinux/vbox
refs/heads/master
src/VBox/ValidationKit/testmanager/batch/virtual_test_sheriff.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- # $Id: virtual_test_sheriff.py $ # pylint: disable=C0301 """ Virtual Test Sheriff. Duties: - Try to a assign failure reasons to recently failed tests. - Reboot or disable bad test boxes. """ __copyright__ = \ """ Copyright (C) 2012-2016 Oracle Corporation This file is part of VirtualBox Open Source Edition (OSE), as available from http://www.virtualbox.org. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License (GPL) as published by the Free Software Foundation, in version 2 as it comes in the "COPYING" file of the VirtualBox OSE distribution. VirtualBox OSE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. The contents of this file may alternatively be used under the terms of the Common Development and Distribution License Version 1.0 (CDDL) only, as it comes in the "COPYING.CDDL" file of the VirtualBox OSE distribution, in which case the provisions of the CDDL are applicable instead of those of the GPL. You may elect to license modified versions of this file under the terms and conditions of either the GPL or the CDDL or both. """ __version__ = "$Revision: 108794 $" # Standard python imports import sys; import os; import hashlib; import StringIO; from optparse import OptionParser; from PIL import Image; # pylint: disable=import-error # Add Test Manager's modules path g_ksTestManagerDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))); sys.path.append(g_ksTestManagerDir); # Test Manager imports from testmanager.core.db import TMDatabaseConnection; from testmanager.core.build import BuildDataEx; from testmanager.core.failurereason import FailureReasonLogic; from testmanager.core.testbox import TestBoxLogic, TestBoxData; from testmanager.core.testcase import TestCaseDataEx; from testmanager.core.testgroup import TestGroupData; from testmanager.core.testset import TestSetLogic, TestSetData; from testmanager.core.testresults import TestResultLogic, TestResultFileData; from testmanager.core.testresultfailures import TestResultFailureLogic, TestResultFailureData; from testmanager.core.useraccount import UserAccountLogic; class VirtualTestSheriffCaseFile(object): """ A failure investigation case file. """ ## Max log file we'll read into memory. (256 MB) kcbMaxLogRead = 0x10000000; def __init__(self, oSheriff, oTestSet, oTree, oBuild, oTestBox, oTestGroup, oTestCase): self.oSheriff = oSheriff; self.oTestSet = oTestSet; # TestSetData self.oTree = oTree; # TestResultDataEx self.oBuild = oBuild; # BuildDataEx self.oTestBox = oTestBox; # TestBoxData self.oTestGroup = oTestGroup; # TestGroupData self.oTestCase = oTestCase; # TestCaseDataEx self.sMainLog = ''; # The main log file. Empty string if not accessible. # Generate a case file name. self.sName = '#%u: %s' % (self.oTestSet.idTestSet, self.oTestCase.sName,) self.sLongName = '#%u: "%s" on "%s" running %s %s (%s), "%s" by %s, using %s %s %s r%u' \ % ( self.oTestSet.idTestSet, self.oTestCase.sName, self.oTestBox.sName, self.oTestBox.sOs, self.oTestBox.sOsVersion, self.oTestBox.sCpuArch, self.oTestBox.sCpuName, self.oTestBox.sCpuVendor, self.oBuild.oCat.sProduct, self.oBuild.oCat.sBranch, self.oBuild.oCat.sType, self.oBuild.iRevision, ); # Investigation notes. self.tReason = None; # None or one of the ktReason_XXX constants. self.dReasonForResultId = {}; # Reason assignments indexed by idTestResult. self.dCommentForResultId = {}; # Comment assignments indexed by idTestResult. # # Reason. # def noteReason(self, tReason): """ Notes down a possible reason. """ self.oSheriff.dprint(u'noteReason: %s -> %s' % (self.tReason, tReason,)); self.tReason = tReason; return True; def noteReasonForId(self, tReason, idTestResult, sComment = None): """ Notes down a possible reason for a specific test result. """ self.oSheriff.dprint(u'noteReasonForId: %u: %s -> %s%s' % (idTestResult, self.dReasonForResultId.get(idTestResult, None), tReason, (u' (%s)' % (sComment,)) if sComment is not None else '')); self.dReasonForResultId[idTestResult] = tReason; if sComment is not None: self.dCommentForResultId[idTestResult] = sComment; return True; # # Test classification. # def isVBoxTest(self): """ Test classification: VirtualBox (using the build) """ return self.oBuild.oCat.sProduct.lower() in [ 'virtualbox', 'vbox' ]; def isVBoxUnitTest(self): """ Test case classification: The unit test doing all our testcase/*.cpp stuff. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower() == 'unit tests'; def isVBoxInstallTest(self): """ Test case classification: VirtualBox Guest installation test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('install:'); def isVBoxUSBTest(self): """ Test case classification: VirtualBox USB test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('usb:'); def isVBoxStorageTest(self): """ Test case classification: VirtualBox Storage test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('storage:'); def isVBoxGAsTest(self): """ Test case classification: VirtualBox Guest Additions test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('ga\'s tests'); def isVBoxAPITest(self): """ Test case classification: VirtualBox API test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('api:'); def isVBoxBenchmarkTest(self): """ Test case classification: VirtualBox Benchmark test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('benchmark:'); def isVBoxSmokeTest(self): """ Test case classification: Smoke test. """ return self.isVBoxTest() \ and self.oTestCase.sName.lower().startswith('smoketest'); # # Utility methods. # def getMainLog(self): """ Tries to reads the main log file since this will be the first source of information. """ if len(self.sMainLog) > 0: return self.sMainLog; (oFile, oSizeOrError, _) = self.oTestSet.openFile('main.log', 'rb'); if oFile is not None: try: self.sMainLog = oFile.read(min(self.kcbMaxLogRead, oSizeOrError)).decode('utf-8', 'replace'); except Exception as oXcpt: self.oSheriff.vprint(u'Error reading main log file: %s' % (oXcpt,)) self.sMainLog = ''; else: self.oSheriff.vprint(u'Error opening main log file: %s' % (oSizeOrError,)); return self.sMainLog; def getLogFile(self, oFile): """ Tries to reads the given file as a utf-8 log file. oFile is a TestFileDataEx instance. Returns empty string if problems opening or reading the file. """ sContent = ''; (oFile, oSizeOrError, _) = self.oTestSet.openFile(oFile.sFile, 'rb'); if oFile is not None: try: sContent = oFile.read(min(self.kcbMaxLogRead, oSizeOrError)).decode('utf-8', 'replace'); except Exception as oXcpt: self.oSheriff.vprint(u'Error reading the "%s" log file: %s' % (oFile.sFile, oXcpt,)) else: self.oSheriff.vprint(u'Error opening the "%s" log file: %s' % (oFile.sFile, oSizeOrError,)); return sContent; def getScreenshotSha256(self, oFile): """ Tries to read the given screenshot file, uncompress it, and do SHA-2 on the raw pixels. Returns SHA-2 digest string on success, None on failure. """ (oFile, _, _) = self.oTestSet.openFile(oFile.sFile, 'rb'); try: abImageFile = oFile.read(); except Exception as oXcpt: self.oSheriff.vprint(u'Error reading the "%s" image file: %s' % (oFile.sFile, oXcpt,)) else: try: oImage = Image.open(StringIO.StringIO(abImageFile)); except Exception as oXcpt: self.oSheriff.vprint(u'Error opening the "%s" image bytes using PIL.Image.open: %s' % (oFile.sFile, oXcpt,)) else: try: oHash = hashlib.sha256(); oHash.update(oImage.tostring()); except Exception as oXcpt: self.oSheriff.vprint(u'Error hashing the uncompressed image bytes for "%s": %s' % (oFile.sFile, oXcpt,)) else: return oHash.hexdigest(); return None; def isSingleTestFailure(self): """ Figure out if this is a single test failing or if it's one of the more complicated ones. """ if self.oTree.cErrors == 1: return True; if self.oTree.deepCountErrorContributers() <= 1: return True; return False; class VirtualTestSheriff(object): # pylint: disable=R0903 """ Add build info into Test Manager database. """ ## The user account for the virtual sheriff. ksLoginName = 'vsheriff'; def __init__(self): """ Parse command line. """ self.oDb = None; self.tsNow = None; self.oTestResultLogic = None; self.oTestSetLogic = None; self.oFailureReasonLogic = None; # FailureReasonLogic; self.oTestResultFailureLogic = None; # TestResultFailureLogic self.oLogin = None; self.uidSelf = -1; self.oLogFile = None; self.asBsodReasons = []; self.asUnitTestReasons = []; oParser = OptionParser(); oParser.add_option('--start-hours-ago', dest = 'cStartHoursAgo', metavar = '<hours>', default = 0, type = 'int', help = 'When to start specified as hours relative to current time. Defauls is right now.', ); oParser.add_option('--hours-period', dest = 'cHoursBack', metavar = '<period-in-hours>', default = 2, type = 'int', help = 'Work period specified in hours. Defauls is 2 hours.'); oParser.add_option('--real-run-back', dest = 'fRealRun', action = 'store_true', default = False, help = 'Whether to commit the findings to the database. Default is a dry run.'); oParser.add_option('-q', '--quiet', dest = 'fQuiet', action = 'store_true', default = False, help = 'Quiet execution'); oParser.add_option('-l', '--log', dest = 'sLogFile', metavar = '<logfile>', default = None, help = 'Where to log messages.'); oParser.add_option('--debug', dest = 'fDebug', action = 'store_true', default = False, help = 'Enables debug mode.'); (self.oConfig, _) = oParser.parse_args(); if self.oConfig.sLogFile is not None and len(self.oConfig.sLogFile) > 0: self.oLogFile = open(self.oConfig.sLogFile, "a"); self.oLogFile.write('VirtualTestSheriff: $Revision: 108794 $ \n'); def eprint(self, sText): """ Prints error messages. Returns 1 (for exit code usage.) """ print 'error: %s' % (sText,); if self.oLogFile is not None: self.oLogFile.write((u'error: %s\n' % (sText,)).encode('utf-8')); return 1; def dprint(self, sText): """ Prints debug info. """ if self.oConfig.fDebug: if not self.oConfig.fQuiet: print 'debug: %s' % (sText, ); if self.oLogFile is not None: self.oLogFile.write((u'debug: %s\n' % (sText,)).encode('utf-8')); return 0; def vprint(self, sText): """ Prints verbose info. """ if not self.oConfig.fQuiet: print 'info: %s' % (sText,); if self.oLogFile is not None: self.oLogFile.write((u'info: %s\n' % (sText,)).encode('utf-8')); return 0; def selfCheck(self): """ Does some self checks, looking up things we expect to be in the database and such. """ rcExit = 0; for sAttr in dir(self.__class__): if sAttr.startswith('ktReason_'): tReason = getattr(self.__class__, sAttr); oFailureReason = self.oFailureReasonLogic.cachedLookupByNameAndCategory(tReason[1], tReason[0]); if oFailureReason is None: rcExit = self.eprint(u'Failed to find failure reason "%s" in category "%s" in the database!' % (tReason[1], tReason[0],)); # Check the user account as well. if self.oLogin is None: oLogin = UserAccountLogic(self.oDb).tryFetchAccountByLoginName(VirtualTestSheriff.ksLoginName); if oLogin is None: rcExit = self.eprint(u'Cannot find my user account "%s"!' % (VirtualTestSheriff.ksLoginName,)); return rcExit; def badTestBoxManagement(self): """ Looks for bad test boxes and first tries once to reboot them then disables them. """ rcExit = 0; # # We skip this entirely if we're running in the past and not in harmless debug mode. # if self.oConfig.cStartHoursAgo != 0 \ and (not self.oConfig.fDebug or self.oConfig.fRealRun): return rcExit; tsNow = self.tsNow if self.oConfig.fDebug else None; cHoursBack = self.oConfig.cHoursBack if self.oConfig.fDebug else 2; oTestBoxLogic = TestBoxLogic(self.oDb); # # Get list of bad test boxes for given period and check them out individually. # aidBadTestBoxes = self.oTestSetLogic.fetchBadTestBoxIds(cHoursBack = cHoursBack, tsNow = tsNow); for idTestBox in aidBadTestBoxes: # Skip if the testbox is already disabled or has a pending reboot command. try: oTestBox = TestBoxData().initFromDbWithId(self.oDb, idTestBox); except Exception as oXcpt: rcExit = self.eprint('Failed to get data for test box #%u in badTestBoxManagement: %s' % (idTestBox, oXcpt,)); continue; if not oTestBox.fEnabled: self.dprint(u'badTestBoxManagement: Skipping test box #%u (%s) as it has been disabled already.' % ( idTestBox, oTestBox.sName, )); continue; if oTestBox.enmPendingCmd != TestBoxData.ksTestBoxCmd_None: self.dprint(u'badTestBoxManagement: Skipping test box #%u (%s) as it has a command pending: %s' % ( idTestBox, oTestBox.sName, oTestBox.enmPendingCmd)); continue; # Get the most recent testsets for this box (descending on tsDone) and see how bad it is. aoSets = self.oTestSetLogic.fetchSetsForTestBox(idTestBox, cHoursBack = cHoursBack, tsNow = tsNow); cOkay = 0; cBad = 0; iFirstOkay = len(aoSets); for iSet, oSet in enumerate(aoSets): if oSet.enmStatus == TestSetData.ksTestStatus_BadTestBox: cBad += 1; else: ## @todo maybe check the elapsed time here, it could still be a bad run. cOkay += 1; if iFirstOkay > iSet: iFirstOkay = iSet; if iSet > 10: break; # We react if there are two or more bad-testbox statuses at the head of the # history and at least three in the last 10 results. if iFirstOkay >= 2 and cBad > 2: if oTestBoxLogic.hasTestBoxRecentlyBeenRebooted(idTestBox, cHoursBack = cHoursBack, tsNow = tsNow): self.vprint(u'Disabling testbox #%u (%s) - iFirstOkay=%u cBad=%u cOkay=%u' % ( idTestBox, oTestBox.sName, iFirstOkay, cBad, cOkay)); if self.oConfig.fRealRun is True: try: oTestBoxLogic.disableTestBox(idTestBox, self.uidSelf, fCommit = True, sComment = 'Automatically disabled (iFirstOkay=%u cBad=%u cOkay=%u)' % (iFirstOkay, cBad, cOkay),); except Exception as oXcpt: rcExit = self.eprint(u'Error disabling testbox #%u (%u): %s\n' % (idTestBox, oTestBox.sName, oXcpt,)); else: self.vprint(u'Rebooting testbox #%u (%s) - iFirstOkay=%u cBad=%u cOkay=%u' % ( idTestBox, oTestBox.sName, iFirstOkay, cBad, cOkay)); if self.oConfig.fRealRun is True: try: oTestBoxLogic.rebootTestBox(idTestBox, self.uidSelf, fCommit = True, sComment = 'Automatically rebooted (iFirstOkay=%u cBad=%u cOkay=%u)' % (iFirstOkay, cBad, cOkay),); except Exception as oXcpt: rcExit = self.eprint(u'Error rebooting testbox #%u (%u): %s\n' % (idTestBox, oTestBox.sName, oXcpt,)); else: self.dprint(u'badTestBoxManagement: #%u (%s) looks ok: iFirstOkay=%u cBad=%u cOkay=%u' % ( idTestBox, oTestBox.sName, iFirstOkay, cBad, cOkay)); return rcExit; ## @name Failure reasons we know. ## @{ ktReason_BSOD_Recovery = ( 'BSOD', 'Recovery' ); ktReason_BSOD_Automatic_Repair = ( 'BSOD', 'Automatic Repair' ); ktReason_BSOD_C0000225 = ( 'BSOD', '0xC0000225 (boot)' ); ktReason_Guru_Generic = ( 'Guru Meditations', 'Generic Guru Meditation' ); ktReason_Guru_VERR_IEM_INSTR_NOT_IMPLEMENTED = ( 'Guru Meditations', 'VERR_IEM_INSTR_NOT_IMPLEMENTED' ); ktReason_Guru_VERR_IEM_ASPECT_NOT_IMPLEMENTED = ( 'Guru Meditations', 'VERR_IEM_ASPECT_NOT_IMPLEMENTED' ); ktReason_Guru_VERR_TRPM_DONT_PANIC = ( 'Guru Meditations', 'VERR_TRPM_DONT_PANIC' ); ktReason_Guru_VERR_PGM_PHYS_PAGE_RESERVED = ( 'Guru Meditations', 'VERR_PGM_PHYS_PAGE_RESERVED' ); ktReason_Guru_VERR_VMX_INVALID_GUEST_STATE = ( 'Guru Meditations', 'VERR_VMX_INVALID_GUEST_STATE' ); ktReason_Guru_VINF_EM_TRIPLE_FAULT = ( 'Guru Meditations', 'VINF_EM_TRIPLE_FAULT' ); ktReason_Host_HostMemoryLow = ( 'Host', 'HostMemoryLow' ); ktReason_Host_Reboot_OSX_Watchdog_Timeout = ( 'Host Reboot', 'OSX Watchdog Timeout' ); ktReason_Networking_Nonexistent_host_nic = ( 'Networking', 'Nonexistent host networking interface' ); ktReason_OSInstall_GRUB_hang = ( 'O/S Install', 'GRUB hang' ); ktReason_Panic_MP_BIOS_IO_APIC = ( 'Panic', 'MP-BIOS/IO-APIC' ); ktReason_XPCOM_Exit_Minus_11 = ( 'API / (XP)COM', 'exit -11' ); ktReason_XPCOM_VBoxSVC_Hang = ( 'API / (XP)COM', 'VBoxSVC hang' ); ktReason_XPCOM_VBoxSVC_Hang_Plus_Heap_Corruption = ( 'API / (XP)COM', 'VBoxSVC hang + heap corruption' ); ktReason_XPCOM_NS_ERROR_CALL_FAILED = ( 'API / (XP)COM', 'NS_ERROR_CALL_FAILED' ); ktReason_Unknown_Heap_Corruption = ( 'Unknown', 'Heap corruption' ); ktReason_Unknown_Reboot_Loop = ( 'Unknown', 'Reboot loop' ); ktReason_Ignore_Buggy_Test_Driver = ( 'Ignore', 'Buggy test driver' ); ktReason_Ignore_Stale_Files = ( 'Ignore', 'Stale files' ); ktReason_Buggy_Build_Broken_Build = ( 'Broken Build', 'Buggy build' ); ## @} ## BSOD category. ksBsodCategory = 'BSOD'; ## Special reason indicating that the flesh and blood sheriff has work to do. ksBsodAddNew = 'Add new BSOD'; ## Unit test category. ksUnitTestCategory = 'Unit'; ## Special reason indicating that the flesh and blood sheriff has work to do. ksUnitTestAddNew = 'Add new'; ## Used for indica that we shouldn't report anything for this test result ID and ## consider promoting the previous error to test set level if it's the only one. ktHarmless = ( 'Probably', 'Caused by previous error' ); def caseClosed(self, oCaseFile): """ Reports the findings in the case and closes it. """ # # Log it and create a dReasonForReasultId we can use below. # dCommentForResultId = oCaseFile.dCommentForResultId; if len(oCaseFile.dReasonForResultId) > 0: # Must weed out ktHarmless. dReasonForResultId = {}; for idKey, tReason in oCaseFile.dReasonForResultId.items(): if tReason is not self.ktHarmless: dReasonForResultId[idKey] = tReason; if len(dReasonForResultId) == 0: self.vprint(u'TODO: Closing %s without a real reason, only %s.' % (oCaseFile.sName, oCaseFile.dReasonForResultId)); return False; # Try promote to single reason. atValues = dReasonForResultId.values(); fSingleReason = True; if len(dReasonForResultId) == 1 and dReasonForResultId.keys()[0] != oCaseFile.oTestSet.idTestResult: self.dprint(u'Promoting single reason to whole set: %s' % (atValues[0],)); elif len(dReasonForResultId) > 1 and len(atValues) == atValues.count(atValues[0]): self.dprint(u'Merged %d reasons to a single one: %s' % (len(atValues), atValues[0])); else: fSingleReason = False; if fSingleReason: dReasonForResultId = { oCaseFile.oTestSet.idTestResult: atValues[0], }; if len(dCommentForResultId) > 0: dCommentForResultId = { oCaseFile.oTestSet.idTestResult: dCommentForResultId.values()[0], }; elif oCaseFile.tReason is not None: dReasonForResultId = { oCaseFile.oTestSet.idTestResult: oCaseFile.tReason, }; else: self.vprint(u'Closing %s without a reason - this should not happen!' % (oCaseFile.sName,)); return False; self.vprint(u'Closing %s with following reason%s: %s' % ( oCaseFile.sName, 's' if dReasonForResultId > 0 else '', dReasonForResultId, )); # # Add the test failure reason record(s). # for idTestResult, tReason in dReasonForResultId.items(): oFailureReason = self.oFailureReasonLogic.cachedLookupByNameAndCategory(tReason[1], tReason[0]); if oFailureReason is not None: sComment = 'Set by $Revision: 108794 $' # Handy for reverting later. if idTestResult in dCommentForResultId: sComment += ': ' + dCommentForResultId[idTestResult]; oAdd = TestResultFailureData(); oAdd.initFromValues(idTestResult = idTestResult, idFailureReason = oFailureReason.idFailureReason, uidAuthor = self.uidSelf, idTestSet = oCaseFile.oTestSet.idTestSet, sComment = sComment,); if self.oConfig.fRealRun: try: self.oTestResultFailureLogic.addEntry(oAdd, self.uidSelf, fCommit = True); except Exception as oXcpt: self.eprint(u'caseClosed: Exception "%s" while adding reason %s for %s' % (oXcpt, oAdd, oCaseFile.sLongName,)); else: self.eprint(u'caseClosed: Cannot locate failure reason: %s / %s' % ( tReason[0], tReason[1],)); return True; # # Tools for assiting log parsing. # @staticmethod def matchFollowedByLines(sStr, off, asFollowingLines): """ Worker for isThisFollowedByTheseLines. """ # Advance off to the end of the line. off = sStr.find('\n', off); if off < 0: return False; off += 1; # Match each string with the subsequent lines. for iLine, sLine in enumerate(asFollowingLines): offEnd = sStr.find('\n', off); if offEnd < 0: return iLine + 1 == len(asFollowingLines) and sStr.find(sLine, off) < 0; if len(sLine) > 0 and sStr.find(sLine, off, offEnd) < 0: return False; # next line. off = offEnd + 1; return True; @staticmethod def isThisFollowedByTheseLines(sStr, sFirst, asFollowingLines): """ Looks for a line contining sFirst which is then followed by lines with the strings in asFollowingLines. (No newline chars anywhere!) Returns True / False. """ off = sStr.find(sFirst, 0); while off >= 0: if VirtualTestSheriff.matchFollowedByLines(sStr, off, asFollowingLines): return True; off = sStr.find(sFirst, off + 1); return False; @staticmethod def findAndReturnResetOfLine(sHaystack, sNeedle): """ Looks for sNeedle in sHaystack. Returns The text following the needle up to the end of the line. Returns None if not found. """ off = sHaystack.find(sNeedle); if off < 0: return None; off += len(sNeedle) offEol = sHaystack.find('\n', off); if offEol < 0: offEol = len(sHaystack); return sHaystack[off:offEol] @staticmethod def findInAnyAndReturnResetOfLine(asHaystacks, sNeedle): """ Looks for sNeedle in zeroe or more haystacks (asHaystack). Returns The text following the first needed found up to the end of the line. Returns None if not found. """ for sHaystack in asHaystacks: sRet = VirtualTestSheriff.findAndReturnResetOfLine(sHaystack, sNeedle); if sRet is not None: return sRet; return None; # # The investigative units. # def investigateBadTestBox(self, oCaseFile): """ Checks out bad-testbox statuses. """ _ = oCaseFile; return False; def investigateVBoxUnitTest(self, oCaseFile): """ Checks out a VBox unittest problem. """ # # Process simple test case failures first, using their name as reason. # We do the reason management just like for BSODs. # cRelevantOnes = 0; aoFailedResults = oCaseFile.oTree.getListOfFailures(); for oFailedResult in aoFailedResults: if oFailedResult is oCaseFile.oTree: self.vprint('TODO: toplevel failure'); cRelevantOnes += 1 elif oFailedResult.sName == 'Installing VirtualBox': self.vprint('TODO: Installation failure'); cRelevantOnes += 1 elif oFailedResult.sName == 'Uninstalling VirtualBox': self.vprint('TODO: Uninstallation failure'); cRelevantOnes += 1 elif oFailedResult.oParent is not None: # Get the 2nd level node because that's where we'll find the unit test name. while oFailedResult.oParent.oParent is not None: oFailedResult = oFailedResult.oParent; # Only report a failure once. if oFailedResult.idTestResult not in oCaseFile.dReasonForResultId: sKey = oFailedResult.sName; if sKey.startswith('testcase/'): sKey = sKey[9:]; if sKey in self.asUnitTestReasons: tReason = ( self.ksUnitTestCategory, sKey ); oCaseFile.noteReasonForId(tReason, oFailedResult.idTestResult); else: self.dprint(u'Unit test failure "%s" not found in %s;' % (sKey, self.asUnitTestReasons)); tReason = ( self.ksUnitTestCategory, self.ksUnitTestAddNew ); oCaseFile.noteReasonForId(tReason, oFailedResult.idTestResult, sComment = sKey); cRelevantOnes += 1 else: self.vprint(u'Internal error: expected oParent to NOT be None for %s' % (oFailedResult,)); # # If we've caught all the relevant ones by now, report the result. # if len(oCaseFile.dReasonForResultId) >= cRelevantOnes: return self.caseClosed(oCaseFile); return False; ## Things we search a main or VM log for to figure out why something went bust. katSimpleMainAndVmLogReasons = [ # ( Whether to stop on hit, reason tuple, needle text. ) ( False, ktReason_Guru_Generic, 'GuruMeditation' ), ( False, ktReason_Guru_Generic, 'Guru Meditation' ), ( True, ktReason_Guru_VERR_IEM_INSTR_NOT_IMPLEMENTED, 'VERR_IEM_INSTR_NOT_IMPLEMENTED' ), ( True, ktReason_Guru_VERR_IEM_ASPECT_NOT_IMPLEMENTED, 'VERR_IEM_ASPECT_NOT_IMPLEMENTED' ), ( True, ktReason_Guru_VERR_TRPM_DONT_PANIC, 'VERR_TRPM_DONT_PANIC' ), ( True, ktReason_Guru_VERR_PGM_PHYS_PAGE_RESERVED, 'VERR_PGM_PHYS_PAGE_RESERVED' ), ( True, ktReason_Guru_VERR_VMX_INVALID_GUEST_STATE, 'VERR_VMX_INVALID_GUEST_STATE' ), ( True, ktReason_Guru_VINF_EM_TRIPLE_FAULT, 'VINF_EM_TRIPLE_FAULT' ), ( True, ktReason_Networking_Nonexistent_host_nic, 'rc=E_FAIL text="Nonexistent host networking interface, name \'eth0\' (VERR_INTERNAL_ERROR)"' ), ( True, ktReason_Host_Reboot_OSX_Watchdog_Timeout, ': "OSX Watchdog Timeout: ' ), ( False, ktReason_XPCOM_NS_ERROR_CALL_FAILED, 'Exception: 0x800706be (Call to remote object failed (NS_ERROR_CALL_FAILED))' ), ( True, ktReason_Host_HostMemoryLow, 'HostMemoryLow' ), ( True, ktReason_Host_HostMemoryLow, 'Failed to procure handy pages; rc=VERR_NO_MEMORY' ), ]; ## Things we search the _RIGHT_ _STRIPPED_ vgatext for. katSimpleVgaTextReasons = [ # ( Whether to stop on hit, reason tuple, needle text. ) ( True, ktReason_Panic_MP_BIOS_IO_APIC, "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n\n" ), ( True, ktReason_Panic_MP_BIOS_IO_APIC, "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n" "...trying to set up timer (IRQ0) through the 8259A ... failed.\n" "...trying to set up timer as Virtual Wire IRQ... failed.\n" "...trying to set up timer as ExtINT IRQ... failed :(.\n" "Kernel panic - not syncing: IO-APIC + timer doesn't work! Boot with apic=debug\n" "and send a report. Then try booting with the 'noapic' option\n" "\n" ), ( True, ktReason_OSInstall_GRUB_hang, "-----\nGRUB Loading stage2..\n\n\n\n" ), ]; ## Mapping screenshot/failure SHA-256 hashes to failure reasons. katSimpleScreenshotHashReasons = [ # ( Whether to stop on hit, reason tuple, lowercased sha-256 of PIL.Image.tostring output ) ( True, ktReason_BSOD_Recovery, '576f8e38d62b311cac7e3dc3436a0d0b9bd8cfd7fa9c43aafa95631520a45eac' ), ( True, ktReason_BSOD_Automatic_Repair, 'c6a72076cc619937a7a39cfe9915b36d94cee0d4e3ce5ce061485792dcee2749' ), ( True, ktReason_BSOD_Automatic_Repair, '26c4d8a724ff2c5e1051f3d5b650dbda7b5fdee0aa3e3c6059797f7484a515df' ), ( True, ktReason_BSOD_C0000225, 'bd13a144be9dcdfb16bc863ff4c8f02a86e263c174f2cd5ffd27ca5f3aa31789' ), ( True, ktReason_BSOD_C0000225, '8348b465e7ee9e59dd4e785880c57fd8677de05d11ac21e786bfde935307b42f' ), ( True, ktReason_BSOD_C0000225, '1316e1fc818a73348412788e6910b8c016f237d8b4e15b20caf4a866f7a7840e' ), ( True, ktReason_BSOD_C0000225, '54e0acbff365ce20a85abbe42bcd53647b8b9e80c68e45b2cd30e86bf177a0b5' ), ( True, ktReason_BSOD_C0000225, '50fec50b5199923fa48b3f3e782687cc381e1c8a788ebda14e6a355fbe3bb1b3' ), ]; def investigateVMResult(self, oCaseFile, oFailedResult, sResultLog): """ Investigates a failed VM run. """ def investigateLogSet(): """ Investigates the current set of VM related logs. """ self.dprint('investigateLogSet: lengths: result log %u, VM log %u, kernel log %u, vga text %u, info text %u' % ( len(sResultLog) if sResultLog is not None else 0, len(sVMLog) if sVMLog is not None else 0, len(sKrnlLog) if sKrnlLog is not None else 0, len(sVgaText) if sVgaText is not None else 0, len(sInfoText) if sInfoText is not None else 0, )); #self.dprint(u'main.log<<<\n%s\n<<<\n' % (sResultLog,)); #self.dprint(u'vbox.log<<<\n%s\n<<<\n' % (sVMLog,)); #self.dprint(u'krnl.log<<<\n%s\n<<<\n' % (sKrnlLog,)); #self.dprint(u'vgatext.txt<<<\n%s\n<<<\n' % (sVgaText,)); #self.dprint(u'info.txt<<<\n%s\n<<<\n' % (sInfoText,)); # TODO: more # # Look for BSODs. Some stupid stupid inconsistencies in reason and log messages here, so don't try prettify this. # sDetails = self.findInAnyAndReturnResetOfLine([ sVMLog, sResultLog ], 'GIM: HyperV: Guest indicates a fatal condition! P0='); if sDetails is not None: # P0=%#RX64 P1=%#RX64 P2=%#RX64 P3=%#RX64 P4=%#RX64 " sKey = sDetails.split(' ', 1)[0]; try: sKey = '0x%08X' % (int(sKey, 16),); except: pass; if sKey in self.asBsodReasons: tReason = ( self.ksBsodCategory, sKey ); elif sKey.lower() in self.asBsodReasons: # just in case. tReason = ( self.ksBsodCategory, sKey.lower() ); else: self.dprint(u'BSOD "%s" not found in %s;' % (sKey, self.asBsodReasons)); tReason = ( self.ksBsodCategory, self.ksBsodAddNew ); return oCaseFile.noteReasonForId(tReason, oFailedResult.idTestResult, sComment = sDetails.strip()); # # Look for linux panic. # if sKrnlLog is not None: pass; ## @todo # # Loop thru the simple stuff. # fFoundSomething = False; for fStopOnHit, tReason, sNeedle in self.katSimpleMainAndVmLogReasons: if sResultLog.find(sNeedle) > 0 or sVMLog.find(sNeedle) > 0: oCaseFile.noteReasonForId(tReason, oFailedResult.idTestResult); if fStopOnHit: return True; fFoundSomething = True; # Continue with vga text. if sVgaText is not None and len(sVgaText) > 0: for fStopOnHit, tReason, sNeedle in self.katSimpleVgaTextReasons: if sVgaText.find(sNeedle) > 0: oCaseFile.noteReasonForId(tReason, oFailedResult.idTestResult); if fStopOnHit: return True; fFoundSomething = True; _ = sInfoText; # Continue with screen hashes. if sScreenHash is not None: for fStopOnHit, tReason, sHash in self.katSimpleScreenshotHashReasons: if sScreenHash == sHash: oCaseFile.noteReasonForId(tReason, oFailedResult.idTestResult); if fStopOnHit: return True; fFoundSomething = True; # # Check for repeated reboots... # cResets = sVMLog.count('Changing the VM state from \'RUNNING\' to \'RESETTING\''); if cResets > 10: return oCaseFile.noteReasonForId(self.ktReason_Unknown_Reboot_Loop, oFailedResult.idTestResult, sComment = 'Counted %s reboots' % (cResets,)); return fFoundSomething; # # Check if we got any VM or/and kernel logs. Treat them as sets in # case we run multiple VMs here (this is of course ASSUMING they # appear in the order that terminateVmBySession uploads them). # sVMLog = None; sScreenHash = None; sKrnlLog = None; sVgaText = None; sInfoText = None; for oFile in oFailedResult.aoFiles: if oFile.sKind == TestResultFileData.ksKind_LogReleaseVm: if sVMLog is not None: if investigateLogSet() is True: return True; sKrnlLog = None; sScreenHash = None; sVgaText = None; sInfoText = None; sVMLog = oCaseFile.getLogFile(oFile); elif oFile.sKind == TestResultFileData.ksKind_LogGuestKernel: sKrnlLog = oCaseFile.getLogFile(oFile); elif oFile.sKind == TestResultFileData.ksKind_InfoVgaText: sVgaText = '\n'.join([sLine.rstrip() for sLine in oCaseFile.getLogFile(oFile).split('\n')]); elif oFile.sKind == TestResultFileData.ksKind_InfoCollection: sInfoText = oCaseFile.getLogFile(oFile); elif oFile.sKind == TestResultFileData.ksKind_ScreenshotFailure: sScreenHash = oCaseFile.getScreenshotSha256(oFile); if sScreenHash is not None: sScreenHash = sScreenHash.lower(); self.vprint(u'%s %s' % ( sScreenHash, oFile.sFile,)); if sVMLog is not None and investigateLogSet() is True: return True; return None; def isResultFromVMRun(self, oFailedResult, sResultLog): """ Checks if this result and corresponding log snippet looks like a VM run. """ # Look for startVmEx/ startVmAndConnectToTxsViaTcp and similar output in the log. if sResultLog.find(' startVm') > 0: return True; # Any other indicators? No? _ = oFailedResult; return False; def investigateVBoxVMTest(self, oCaseFile, fSingleVM): """ Checks out a VBox VM test. This is generic investigation of a test running one or more VMs, like for example a smoke test or a guest installation test. The fSingleVM parameter is a hint, which probably won't come in useful. """ _ = fSingleVM; # # Get a list of test result failures we should be looking into and the main log. # aoFailedResults = oCaseFile.oTree.getListOfFailures(); sMainLog = oCaseFile.getMainLog(); # # There are a set of errors ending up on the top level result record. # Should deal with these first. # if len(aoFailedResults) == 1 and aoFailedResults[0] == oCaseFile.oTree: # Check if we've just got that XPCOM client smoke test shutdown issue. This will currently always # be reported on the top result because vboxinstall.py doesn't add an error for it. It is easy to # ignore other failures in the test if we're not a little bit careful here. if sMainLog.find('vboxinstaller: Exit code: -11 (') > 0: oCaseFile.noteReason(self.ktReason_XPCOM_Exit_Minus_11); return self.caseClosed(oCaseFile); # Hang after starting VBoxSVC (e.g. idTestSet=136307258) if self.isThisFollowedByTheseLines(sMainLog, 'oVBoxMgr=<vboxapi.VirtualBoxManager object at', (' Timeout: ', ' Attempting to abort child...',) ): if sMainLog.find('*** glibc detected *** /') > 0: oCaseFile.noteReason(self.ktReason_XPCOM_VBoxSVC_Hang_Plus_Heap_Corruption); else: oCaseFile.noteReason(self.ktReason_XPCOM_VBoxSVC_Hang); return self.caseClosed(oCaseFile); # Look for heap corruption without visible hang. if sMainLog.find('*** glibc detected *** /') > 0 \ or sMainLog.find("-1073740940") > 0: # STATUS_HEAP_CORRUPTION / 0xc0000374 oCaseFile.noteReason(self.ktReason_Unknown_Heap_Corruption); return self.caseClosed(oCaseFile); # Out of memory w/ timeout. if sMainLog.find('sErrId=HostMemoryLow') > 0: oCaseFile.noteReason(self.ktReason_Host_HostMemoryLow); return self.caseClosed(oCaseFile); # Stale files like vts_rm.exe (windows). offEnd = sMainLog.rfind('*** The test driver exits successfully. ***'); if offEnd > 0 and sMainLog.find('[Error 145] The directory is not empty: ', offEnd) > 0: oCaseFile.noteReason(self.ktReason_Ignore_Stale_Files); return self.caseClosed(oCaseFile); # # XPCOM screwup # if sMainLog.find('AttributeError: \'NoneType\' object has no attribute \'addObserver\'') > 0: oCaseFile.noteReason(self.ktReason_Buggy_Build_Broken_Build); return self.caseClosed(oCaseFile); # # Go thru each failed result. # for oFailedResult in aoFailedResults: self.dprint(u'Looking at test result #%u - %s' % (oFailedResult.idTestResult, oFailedResult.getFullName(),)); sResultLog = TestSetData.extractLogSectionElapsed(sMainLog, oFailedResult.tsCreated, oFailedResult.tsElapsed); if oFailedResult.sName == 'Installing VirtualBox': self.vprint('TODO: Installation failure'); elif oFailedResult.sName == 'Uninstalling VirtualBox': self.vprint('TODO: Uninstallation failure'); elif self.isResultFromVMRun(oFailedResult, sResultLog): self.investigateVMResult(oCaseFile, oFailedResult, sResultLog); elif sResultLog.find('Exception: 0x800706be (Call to remote object failed (NS_ERROR_CALL_FAILED))') > 0: oCaseFile.noteReasonForId(self.ktReason_XPCOM_NS_ERROR_CALL_FAILED, oFailedResult.idTestResult); elif sResultLog.find('The machine is not mutable (state is ') > 0: self.vprint('Ignoring "machine not mutable" error as it is probably due to an earlier problem'); oCaseFile.noteReasonForId(self.ktHarmless, oFailedResult.idTestResult); elif sResultLog.find('** error: no action was specified') > 0 \ or sResultLog.find('(len(self._asXml, asText))') > 0: oCaseFile.noteReasonForId(self.ktReason_Ignore_Buggy_Test_Driver, oFailedResult.idTestResult); else: self.vprint(u'TODO: Cannot place idTestResult=%u - %s' % (oFailedResult.idTestResult, oFailedResult.sName,)); self.dprint(u'%s + %s <<\n%s\n<<' % (oFailedResult.tsCreated, oFailedResult.tsElapsed, sResultLog,)); # # Report home and close the case if we got them all, otherwise log it. # if len(oCaseFile.dReasonForResultId) >= len(aoFailedResults): return self.caseClosed(oCaseFile); if len(oCaseFile.dReasonForResultId) > 0: self.vprint(u'TODO: Got %u out of %u - close, but no cigar. :-/' % (len(oCaseFile.dReasonForResultId), len(aoFailedResults))); else: self.vprint(u'XXX: Could not figure out anything at all! :-('); return False; def reasoningFailures(self): """ Guess the reason for failures. """ # # Get a list of failed test sets without any assigned failure reason. # cGot = 0; aoTestSets = self.oTestSetLogic.fetchFailedSetsWithoutReason(cHoursBack = self.oConfig.cHoursBack, tsNow = self.tsNow); for oTestSet in aoTestSets: self.dprint(u''); self.dprint(u'reasoningFailures: Checking out test set #%u, status %s' % ( oTestSet.idTestSet, oTestSet.enmStatus,)) # # Open a case file and assign it to the right investigator. # (oTree, _ ) = self.oTestResultLogic.fetchResultTree(oTestSet.idTestSet); oBuild = BuildDataEx().initFromDbWithId( self.oDb, oTestSet.idBuild, oTestSet.tsCreated); oTestBox = TestBoxData().initFromDbWithGenId( self.oDb, oTestSet.idGenTestBox); oTestGroup = TestGroupData().initFromDbWithId( self.oDb, oTestSet.idTestGroup, oTestSet.tsCreated); oTestCase = TestCaseDataEx().initFromDbWithGenId( self.oDb, oTestSet.idGenTestCase, oTestSet.tsConfig); oCaseFile = VirtualTestSheriffCaseFile(self, oTestSet, oTree, oBuild, oTestBox, oTestGroup, oTestCase); if oTestSet.enmStatus == TestSetData.ksTestStatus_BadTestBox: self.dprint(u'investigateBadTestBox is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateBadTestBox(oCaseFile); elif oCaseFile.isVBoxUnitTest(): self.dprint(u'investigateVBoxUnitTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxUnitTest(oCaseFile); elif oCaseFile.isVBoxInstallTest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = True); elif oCaseFile.isVBoxUSBTest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = True); elif oCaseFile.isVBoxStorageTest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = True); elif oCaseFile.isVBoxGAsTest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = True); elif oCaseFile.isVBoxAPITest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = True); elif oCaseFile.isVBoxBenchmarkTest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = False); elif oCaseFile.isVBoxSmokeTest(): self.dprint(u'investigateVBoxVMTest is taking over %s.' % (oCaseFile.sLongName,)); fRc = self.investigateVBoxVMTest(oCaseFile, fSingleVM = False); else: self.vprint(u'reasoningFailures: Unable to classify test set: %s' % (oCaseFile.sLongName,)); fRc = False; cGot += fRc is True; self.vprint(u'reasoningFailures: Got %u out of %u' % (cGot, len(aoTestSets), )); return 0; def main(self): """ The 'main' function. Return exit code (0, 1, etc). """ # Database stuff. self.oDb = TMDatabaseConnection() self.oTestResultLogic = TestResultLogic(self.oDb); self.oTestSetLogic = TestSetLogic(self.oDb); self.oFailureReasonLogic = FailureReasonLogic(self.oDb); self.oTestResultFailureLogic = TestResultFailureLogic(self.oDb); self.asBsodReasons = self.oFailureReasonLogic.fetchForSheriffByNamedCategory(self.ksBsodCategory); self.asUnitTestReasons = self.oFailureReasonLogic.fetchForSheriffByNamedCategory(self.ksUnitTestCategory); # Get a fix on our 'now' before we do anything.. self.oDb.execute('SELECT CURRENT_TIMESTAMP - interval \'%s hours\'', (self.oConfig.cStartHoursAgo,)); self.tsNow = self.oDb.fetchOne(); # If we're suppost to commit anything we need to get our user ID. rcExit = 0; if self.oConfig.fRealRun: self.oLogin = UserAccountLogic(self.oDb).tryFetchAccountByLoginName(VirtualTestSheriff.ksLoginName); if self.oLogin is None: rcExit = self.eprint('Cannot find my user account "%s"!' % (VirtualTestSheriff.ksLoginName,)); else: self.uidSelf = self.oLogin.uid; # Do the stuff. if rcExit == 0: rcExit = self.selfCheck(); if rcExit == 0: rcExit = self.badTestBoxManagement(); rcExit2 = self.reasoningFailures(); if rcExit == 0: rcExit = rcExit2; # Cleanup. self.oFailureReasonLogic = None; self.oTestResultFailureLogic = None; self.oTestSetLogic = None; self.oTestResultLogic = None; self.oDb.close(); self.oDb = None; if self.oLogFile is not None: self.oLogFile.close(); self.oLogFile = None; return rcExit; if __name__ == '__main__': sys.exit(VirtualTestSheriff().main());
emonty/ansible
refs/heads/devel
lib/ansible/inventory/data.py
43
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys from ansible import constants as C from ansible.errors import AnsibleError from ansible.inventory.group import Group from ansible.inventory.host import Host from ansible.module_utils.six import iteritems, string_types from ansible.utils.display import Display from ansible.utils.vars import combine_vars from ansible.utils.path import basedir display = Display() class InventoryData(object): """ Holds inventory data (host and group objects). Using it's methods should guarantee expected relationships and data. """ def __init__(self): # the inventory object holds a list of groups self.groups = {} self.hosts = {} # provides 'groups' magic var, host object has group_names self._groups_dict_cache = {} # current localhost, implicit or explicit self.localhost = None self.current_source = None # Always create the 'all' and 'ungrouped' groups, for group in ('all', 'ungrouped'): self.add_group(group) self.add_child('all', 'ungrouped') def serialize(self): self._groups_dict_cache = None data = { 'groups': self.groups, 'hosts': self.hosts, 'local': self.localhost, 'source': self.current_source, } return data def deserialize(self, data): self._groups_dict_cache = {} self.hosts = data.get('hosts') self.groups = data.get('groups') self.localhost = data.get('local') self.current_source = data.get('source') def _create_implicit_localhost(self, pattern): if self.localhost: new_host = self.localhost else: new_host = Host(pattern) new_host.address = "127.0.0.1" new_host.implicit = True # set localhost defaults py_interp = sys.executable if not py_interp: # sys.executable is not set in some cornercases. see issue #13585 py_interp = '/usr/bin/python' display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. ' 'You can correct this by setting ansible_python_interpreter for localhost') new_host.set_variable("ansible_python_interpreter", py_interp) new_host.set_variable("ansible_connection", 'local') self.localhost = new_host return new_host def reconcile_inventory(self): ''' Ensure inventory basic rules, run after updates ''' display.debug('Reconcile groups and hosts in inventory.') self.current_source = None group_names = set() # set group vars from group_vars/ files and vars plugins for g in self.groups: group = self.groups[g] group_names.add(group.name) # ensure all groups inherit from 'all' if group.name != 'all' and not group.get_ancestors(): self.add_child('all', group.name) host_names = set() # get host vars from host_vars/ files and vars plugins for host in self.hosts.values(): host_names.add(host.name) mygroups = host.get_groups() if self.groups['ungrouped'] in mygroups: # clear ungrouped of any incorrectly stored by parser if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])): self.groups['ungrouped'].remove_host(host) elif not host.implicit: # add ungrouped hosts to ungrouped, except implicit length = len(mygroups) if length == 0 or (length == 1 and self.groups['all'] in mygroups): self.add_child('ungrouped', host.name) # special case for implicit hosts if host.implicit: host.vars = combine_vars(self.groups['all'].get_vars(), host.vars) # warn if overloading identifier as both group and host for conflict in group_names.intersection(host_names): display.warning("Found both group and host with same name: %s" % conflict) self._groups_dict_cache = {} def get_host(self, hostname): ''' fetch host object using name deal with implicit localhost ''' matching_host = self.hosts.get(hostname, None) # if host is not in hosts dict if matching_host is None and hostname in C.LOCALHOST: # might need to create implicit localhost matching_host = self._create_implicit_localhost(hostname) return matching_host def add_group(self, group): ''' adds a group to inventory if not there already, returns named actually used ''' if group: if not isinstance(group, string_types): raise AnsibleError("Invalid group name supplied, expected a string but got %s for %s" % (type(group), group)) if group not in self.groups: g = Group(group) if g.name not in self.groups: self.groups[g.name] = g self._groups_dict_cache = {} display.debug("Added group %s to inventory" % group) group = g.name else: display.debug("group %s already in inventory" % group) else: raise AnsibleError("Invalid empty/false group name provided: %s" % group) return group def remove_group(self, group): if group in self.groups: del self.groups[group] display.debug("Removed group %s from inventory" % group) self._groups_dict_cache = {} for host in self.hosts: h = self.hosts[host] h.remove_group(group) def add_host(self, host, group=None, port=None): ''' adds a host to inventory and possibly a group if not there already ''' if host: if not isinstance(host, string_types): raise AnsibleError("Invalid host name supplied, expected a string but got %s for %s" % (type(host), host)) # TODO: add to_safe_host_name g = None if group: if group in self.groups: g = self.groups[group] else: raise AnsibleError("Could not find group %s in inventory" % group) if host not in self.hosts: h = Host(host, port) self.hosts[host] = h if self.current_source: # set to 'first source' in which host was encountered self.set_variable(host, 'inventory_file', self.current_source) self.set_variable(host, 'inventory_dir', basedir(self.current_source)) else: self.set_variable(host, 'inventory_file', None) self.set_variable(host, 'inventory_dir', None) display.debug("Added host %s to inventory" % (host)) # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'. if host in C.LOCALHOST: if self.localhost is None: self.localhost = self.hosts[host] display.vvvv("Set default localhost to %s" % h) else: display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name)) else: h = self.hosts[host] if g: g.add_host(h) self._groups_dict_cache = {} display.debug("Added host %s to group %s" % (host, group)) else: raise AnsibleError("Invalid empty host name provided: %s" % host) return host def remove_host(self, host): if host.name in self.hosts: del self.hosts[host.name] for group in self.groups: g = self.groups[group] g.remove_host(host) def set_variable(self, entity, varname, value): ''' sets a variable for an inventory object ''' if entity in self.groups: inv_object = self.groups[entity] elif entity in self.hosts: inv_object = self.hosts[entity] else: raise AnsibleError("Could not identify group or host named %s" % entity) inv_object.set_variable(varname, value) display.debug('set %s for %s' % (varname, entity)) def add_child(self, group, child): ''' Add host or group to group ''' if group in self.groups: g = self.groups[group] if child in self.groups: g.add_child_group(self.groups[child]) elif child in self.hosts: g.add_host(self.hosts[child]) else: raise AnsibleError("%s is not a known host nor group" % child) self._groups_dict_cache = {} display.debug('Group %s now contains %s' % (group, child)) else: raise AnsibleError("%s is not a known group" % group) def get_groups_dict(self): """ We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed. """ if not self._groups_dict_cache: for (group_name, group) in iteritems(self.groups): self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()] return self._groups_dict_cache
Fuzion24/androguard
refs/heads/master
androguard/core/bytecode.py
14
# This file is part of Androguard. # # Copyright (C) 2012/2013, Anthony Desnos <desnos at t0t0.fr> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib from xml.sax.saxutils import escape from struct import unpack, pack import textwrap import json from androconf import warning, error, CONF, enable_colors, remove_colors, save_colors, color_range def disable_print_colors(): colors = save_colors() remove_colors() return colors def enable_print_colors(colors): enable_colors(colors) # Handle exit message def Exit(msg): warning("Error : " + msg) raise ("oops") def Warning(msg): warning(msg) def _PrintBanner(): print_fct = CONF["PRINT_FCT"] print_fct("*" * 75 + "\n") def _PrintSubBanner(title=None): print_fct = CONF["PRINT_FCT"] if title == None: print_fct("#" * 20 + "\n") else: print_fct("#" * 10 + " " + title + "\n") def _PrintNote(note, tab=0): print_fct = CONF["PRINT_FCT"] note_color = CONF["COLORS"]["NOTE"] normal_color = CONF["COLORS"]["NORMAL"] print_fct("\t" * tab + "%s# %s%s" % (note_color, note, normal_color) + "\n") # Print arg into a correct format def _Print(name, arg): buff = name + " " if type(arg).__name__ == 'int': buff += "0x%x" % arg elif type(arg).__name__ == 'long': buff += "0x%x" % arg elif type(arg).__name__ == 'str': buff += "%s" % arg elif isinstance(arg, SV): buff += "0x%x" % arg.get_value() elif isinstance(arg, SVs): buff += arg.get_value().__str__() print buff def PrettyShowEx(exceptions): if len(exceptions) > 0: CONF["PRINT_FCT"]("Exceptions:\n") for i in exceptions: CONF["PRINT_FCT"]("\t%s%s%s\n" % (CONF["COLORS"]["EXCEPTION"], i.show_buff(), CONF["COLORS"]["NORMAL"])) def _PrintXRef(tag, items): print_fct = CONF["PRINT_FCT"] for i in items: print_fct("%s: %s %s %s %s\n" % (tag, i[0].get_class_name(), i[0].get_name(), i[0].get_descriptor(), ' '.join("%x" % j.get_idx() for j in i[1]))) def _PrintDRef(tag, items): print_fct = CONF["PRINT_FCT"] for i in items: print_fct("%s: %s %s %s %s\n" % (tag, i[0].get_class_name(), i[0].get_name(), i[0].get_descriptor(), ' '.join("%x" % j for j in i[1]))) def _PrintDefault(msg): print_fct = CONF["PRINT_FCT"] print_fct(msg) def PrettyShow(m_a, basic_blocks, notes={}): idx = 0 nb = 0 offset_color = CONF["COLORS"]["OFFSET"] offset_addr_color = CONF["COLORS"]["OFFSET_ADDR"] instruction_name_color = CONF["COLORS"]["INSTRUCTION_NAME"] branch_false_color = CONF["COLORS"]["BRANCH_FALSE"] branch_true_color = CONF["COLORS"]["BRANCH_TRUE"] branch_color = CONF["COLORS"]["BRANCH"] exception_color = CONF["COLORS"]["EXCEPTION"] bb_color = CONF["COLORS"]["BB"] normal_color = CONF["COLORS"]["NORMAL"] print_fct = CONF["PRINT_FCT"] colors = CONF["COLORS"]["OUTPUT"] for i in basic_blocks: print_fct("%s%s%s : \n" % (bb_color, i.get_name(), normal_color)) instructions = i.get_instructions() for ins in instructions: if nb in notes: for note in notes[nb]: _PrintNote(note, 1) print_fct("\t%s%-3d%s(%s%08x%s) " % (offset_color, nb, normal_color, offset_addr_color, idx, normal_color)) print_fct("%s%-20s%s" % (instruction_name_color, ins.get_name(), normal_color)) operands = ins.get_operands() print_fct( "%s" % ", ".join(m_a.get_vm().colorize_operands(operands, colors))) op_value = ins.get_op_value() if ins == instructions[-1] and i.childs: print_fct(" ") # packed/sparse-switch if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1: values = i.get_special_ins(idx).get_values() print_fct("%s[ D:%s%s " % (branch_false_color, i.childs[0][2].get_name(), branch_color)) print_fct(' '.join("%d:%s" % ( values[j], i.childs[j + 1][2].get_name()) for j in range(0, len(i.childs) - 1)) + " ]%s" % normal_color) else: if len(i.childs) == 2: print_fct("%s[ %s%s " % (branch_false_color, i.childs[0][2].get_name(), branch_true_color)) print_fct(' '.join("%s" % c[2].get_name( ) for c in i.childs[1:]) + " ]%s" % normal_color) else: print_fct("%s[ " % branch_color + ' '.join( "%s" % c[2].get_name() for c in i.childs) + " ]%s" % normal_color) idx += ins.get_length() nb += 1 print_fct("\n") if i.get_exception_analysis(): print_fct("\t%s%s%s\n" % (exception_color, i.exception_analysis.show_buff(), normal_color)) print_fct("\n") def method2dot(mx, colors={}): """ Export analysis method to dot format @param mx : MethodAnalysis object @param colors : MethodAnalysis object @rtype : dot format buffer (it is a subgraph (dict)) """ colors = colors or { "true_branch": "green", "false_branch": "red", "default_branch": "purple", "jump_branch": "blue", "bg_idx": "lightgray", "idx": "blue", "bg_start_idx": "yellow", "bg_instruction": "lightgray", "instruction_name": "black", "instructions_operands": "yellow", "raw": "red", "string": "red", "literal": "green", "offset": "#4000FF", "method": "#DF3A01", "field": "#088A08", "type": "#0000FF", "registers_range": ("#999933", "#6666FF") } node_tpl = "\nstruct_%s [label=<\n<TABLE BORDER=\"0\" CELLBORDER=\"0\" CELLSPACING=\"3\">\n%s</TABLE>>];\n" label_tpl = "<TR><TD ALIGN=\"LEFT\" BGCOLOR=\"%s\"> <FONT FACE=\"Times-Bold\" color=\"%s\">%x</FONT> </TD><TD ALIGN=\"LEFT\" BGCOLOR=\"%s\"> <FONT FACE=\"Times-Bold\" color=\"%s\">%s </FONT> %s </TD></TR>\n" link_tpl = "<TR><TD PORT=\"%s\"></TD></TR>\n" edges_html = "" blocks_html = "" method = mx.get_method() sha256 = hashlib.sha256("%s%s%s" % ( mx.get_method().get_class_name(), mx.get_method().get_name(), mx.get_method().get_descriptor())).hexdigest() registers = {} if method.get_code(): for DVMBasicMethodBlock in mx.basic_blocks.gets(): for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions( ): operands = DVMBasicMethodBlockInstruction.get_operands(0) for register in operands: if register[0] == 0: if register[1] not in registers: registers[register[1]] = 0 registers[register[1]] += 1 # for i in range(method.get_code().get_registers_size()): # registers[i] = 0 if registers: registers_colors = color_range(colors["registers_range"][0], colors["registers_range"][1], len(registers)) for i in registers: registers[i] = registers_colors.pop(0) new_links = [] for DVMBasicMethodBlock in mx.basic_blocks.gets(): ins_idx = DVMBasicMethodBlock.start block_id = hashlib.md5(sha256 + DVMBasicMethodBlock.get_name( )).hexdigest() content = link_tpl % 'header' for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions( ): if DVMBasicMethodBlockInstruction.get_op_value( ) == 0x2b or DVMBasicMethodBlockInstruction.get_op_value() == 0x2c: new_links.append((DVMBasicMethodBlock, ins_idx, DVMBasicMethodBlockInstruction.get_ref_off( ) * 2 + ins_idx)) elif DVMBasicMethodBlockInstruction.get_op_value() == 0x26: new_links.append((DVMBasicMethodBlock, ins_idx, DVMBasicMethodBlockInstruction.get_ref_off( ) * 2 + ins_idx)) operands = DVMBasicMethodBlockInstruction.get_operands(ins_idx) output = ", ".join(mx.get_vm().get_operand_html( i, registers, colors, escape, textwrap.wrap) for i in operands) formatted_operands = DVMBasicMethodBlockInstruction.get_formatted_operands( ) if formatted_operands: output += " ; %s" % str(formatted_operands) bg_idx = colors["bg_idx"] if ins_idx == 0 and "bg_start_idx" in colors: bg_idx = colors["bg_start_idx"] content += label_tpl % ( bg_idx, colors["idx"], ins_idx, colors["bg_instruction"], colors["instruction_name"], DVMBasicMethodBlockInstruction.get_name(), output) ins_idx += DVMBasicMethodBlockInstruction.get_length() last_instru = DVMBasicMethodBlockInstruction # all blocks from one method parsed # updating dot HTML content content += link_tpl % 'tail' blocks_html += node_tpl % (block_id, content) # Block edges color treatment (conditional branchs colors) val = colors["true_branch"] if len(DVMBasicMethodBlock.childs) > 1: val = colors["false_branch"] elif len(DVMBasicMethodBlock.childs) == 1: val = colors["jump_branch"] values = None if (last_instru.get_op_value() == 0x2b or last_instru.get_op_value() == 0x2c ) and len(DVMBasicMethodBlock.childs) > 1: val = colors["default_branch"] values = ["default"] values.extend(DVMBasicMethodBlock.get_special_ins( ins_idx - last_instru.get_length()).get_values()) # updating dot edges for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs: label_edge = "" if values: label_edge = values.pop(0) child_id = hashlib.md5( sha256 + DVMBasicMethodBlockChild[-1].get_name()).hexdigest() edges_html += "struct_%s:tail -> struct_%s:header [color=\"%s\", label=\"%s\"];\n" % ( block_id, child_id, val, label_edge) # color switch if val == colors["false_branch"]: val = colors["true_branch"] elif val == colors["default_branch"]: val = colors["true_branch"] exception_analysis = DVMBasicMethodBlock.get_exception_analysis() if exception_analysis: for exception_elem in exception_analysis.exceptions: exception_block = exception_elem[-1] if exception_block: exception_id = hashlib.md5( sha256 + exception_block.get_name()).hexdigest() edges_html += "struct_%s:tail -> struct_%s:header [color=\"%s\", label=\"%s\"];\n" % ( block_id, exception_id, "black", exception_elem[0]) for link in new_links: DVMBasicMethodBlock = link[0] DVMBasicMethodBlockChild = mx.basic_blocks.get_basic_block(link[2]) if DVMBasicMethodBlockChild: block_id = hashlib.md5(sha256 + DVMBasicMethodBlock.get_name( )).hexdigest() child_id = hashlib.md5(sha256 + DVMBasicMethodBlockChild.get_name( )).hexdigest() edges_html += "struct_%s:tail -> struct_%s:header [color=\"%s\", label=\"data(0x%x) to @0x%x\", style=\"dashed\"];\n" % ( block_id, child_id, "yellow", link[1], link[2]) method_label = method.get_class_name() + "." + method.get_name( ) + "->" + method.get_descriptor() method_information = method.get_information() if method_information: method_label += "\\nLocal registers v%d ... v%d" % ( method_information["registers"][0], method_information["registers"][1]) if "params" in method_information: for register, rtype in method_information["params"]: method_label += "\\nparam v%d = %s" % (register, rtype) method_label += "\\nreturn = %s" % (method_information["return"]) return {'name': method_label, 'nodes': blocks_html, 'edges': edges_html} def method2format(output, _format="png", mx=None, raw=None): """ Export method to a specific file format @param output : output filename @param _format : format type (png, jpg ...) (default : png) @param mx : specify the MethodAnalysis object @param raw : use directly a dot raw buffer if None """ try: import pydot except ImportError: error("module pydot not found") buff = "digraph {\n" buff += "graph [rankdir=TB]\n" buff += "node [shape=plaintext]\n" if raw: data = raw else: data = method2dot(mx) # subgraphs cluster buff += "subgraph cluster_" + hashlib.md5(output).hexdigest( ) + " {\nlabel=\"%s\"\n" % data['name'] buff += data['nodes'] buff += "}\n" # subgraphs edges buff += data['edges'] buff += "}\n" d = pydot.graph_from_dot_data(buff) if d: getattr(d, "write_" + _format.lower())(output) def method2png(output, mx, raw=False): """ Export method to a png file format :param output: output filename :type output: string :param mx: specify the MethodAnalysis object :type mx: :class:`MethodAnalysis` object :param raw: use directly a dot raw buffer :type raw: string """ buff = raw if raw == False: buff = method2dot(mx) method2format(output, "png", mx, buff) def method2jpg(output, mx, raw=False): """ Export method to a jpg file format :param output: output filename :type output: string :param mx: specify the MethodAnalysis object :type mx: :class:`MethodAnalysis` object :param raw: use directly a dot raw buffer (optional) :type raw: string """ buff = raw if raw == False: buff = method2dot(mx) method2format(output, "jpg", mx, buff) def vm2json(vm): d = {} d["name"] = "root" d["children"] = [] for _class in vm.get_classes(): c_class = {} c_class["name"] = _class.get_name() c_class["children"] = [] for method in _class.get_methods(): c_method = {} c_method["name"] = method.get_name() c_method["children"] = [] c_class["children"].append(c_method) d["children"].append(c_class) return json.dumps(d) class TmpBlock(object): def __init__(self, name): self.name = name def get_name(self): return self.name def method2json(mx, directed_graph=False): if directed_graph: return method2json_direct(mx) return method2json_undirect(mx) def method2json_undirect(mx): d = {} reports = [] d["reports"] = reports for DVMBasicMethodBlock in mx.basic_blocks.gets(): cblock = {} cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name() cblock["registers"] = mx.get_method().get_code().get_registers_size() cblock["instructions"] = [] ins_idx = DVMBasicMethodBlock.start for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions( ): c_ins = {} c_ins["idx"] = ins_idx c_ins["name"] = DVMBasicMethodBlockInstruction.get_name() c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands( ins_idx) cblock["instructions"].append(c_ins) ins_idx += DVMBasicMethodBlockInstruction.get_length() cblock["Edge"] = [] for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs: cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name()) reports.append(cblock) return json.dumps(d) def method2json_direct(mx): d = {} reports = [] d["reports"] = reports hooks = {} l = [] for DVMBasicMethodBlock in mx.basic_blocks.gets(): for index, DVMBasicMethodBlockChild in enumerate( DVMBasicMethodBlock.childs): if DVMBasicMethodBlock.get_name( ) == DVMBasicMethodBlockChild[-1].get_name(): preblock = TmpBlock(DVMBasicMethodBlock.get_name() + "-pre") cnblock = {} cnblock["BasicBlockId"] = DVMBasicMethodBlock.get_name( ) + "-pre" cnblock["start"] = DVMBasicMethodBlock.start cnblock["notes"] = [] cnblock["Edge"] = [DVMBasicMethodBlock.get_name()] cnblock["registers"] = 0 cnblock["instructions"] = [] cnblock["info_bb"] = 0 l.append(cnblock) for parent in DVMBasicMethodBlock.fathers: hooks[parent[-1].get_name()] = [] hooks[parent[-1].get_name()].append(preblock) for idx, child in enumerate(parent[-1].childs): if child[-1].get_name() == DVMBasicMethodBlock.get_name( ): hooks[parent[-1].get_name()].append(child[-1]) for DVMBasicMethodBlock in mx.basic_blocks.gets(): cblock = {} cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name() cblock["start"] = DVMBasicMethodBlock.start cblock["notes"] = DVMBasicMethodBlock.get_notes() cblock["registers"] = mx.get_method().get_code().get_registers_size() cblock["instructions"] = [] ins_idx = DVMBasicMethodBlock.start last_instru = None for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions( ): c_ins = {} c_ins["idx"] = ins_idx c_ins["name"] = DVMBasicMethodBlockInstruction.get_name() c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands( ins_idx) c_ins["formatted_operands" ] = DVMBasicMethodBlockInstruction.get_formatted_operands() cblock["instructions"].append(c_ins) if (DVMBasicMethodBlockInstruction.get_op_value() == 0x2b or DVMBasicMethodBlockInstruction.get_op_value() == 0x2c): values = DVMBasicMethodBlock.get_special_ins(ins_idx) cblock["info_next"] = values.get_values() ins_idx += DVMBasicMethodBlockInstruction.get_length() last_instru = DVMBasicMethodBlockInstruction cblock["info_bb"] = 0 if DVMBasicMethodBlock.childs: if len(DVMBasicMethodBlock.childs) > 1: cblock["info_bb"] = 1 if (last_instru.get_op_value() == 0x2b or last_instru.get_op_value() == 0x2c): cblock["info_bb"] = 2 cblock["Edge"] = [] for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs: ok = False if DVMBasicMethodBlock.get_name() in hooks: if DVMBasicMethodBlockChild[-1] in hooks[ DVMBasicMethodBlock.get_name() ]: ok = True cblock["Edge"].append(hooks[DVMBasicMethodBlock.get_name( )][0].get_name()) if not ok: cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name()) exception_analysis = DVMBasicMethodBlock.get_exception_analysis() if exception_analysis: cblock["Exceptions"] = exception_analysis.get() reports.append(cblock) reports.extend(l) return json.dumps(d) class SV(object): def __init__(self, size, buff): self.__size = size self.__value = unpack(self.__size, buff)[0] def _get(self): return pack(self.__size, self.__value) def __str__(self): return "0x%x" % self.__value def __int__(self): return self.__value def get_value_buff(self): return self._get() def get_value(self): return self.__value def set_value(self, attr): self.__value = attr class SVs(object): def __init__(self, size, ntuple, buff): self.__size = size self.__value = ntuple._make(unpack(self.__size, buff)) def _get(self): l = [] for i in self.__value._fields: l.append(getattr(self.__value, i)) return pack(self.__size, *l) def _export(self): return [x for x in self.__value._fields] def get_value_buff(self): return self._get() def get_value(self): return self.__value def set_value(self, attr): self.__value = self.__value._replace(**attr) def __str__(self): return self.__value.__str__() def object_to_str(obj): if isinstance(obj, str): return obj elif isinstance(obj, bool): return "" elif isinstance(obj, int): return pack("<L", obj) elif obj == None: return "" else: #print type(obj), obj return obj.get_raw() class MethodBC(object): def show(self, value): getattr(self, "show_" + value)() class BuffHandle(object): def __init__(self, buff): self.__buff = buff self.__idx = 0 def size(self): return len(self.__buff) def set_idx(self, idx): self.__idx = idx def get_idx(self): return self.__idx def readNullString(self, size): data = self.read(size) return data def read_b(self, size): return self.__buff[self.__idx:self.__idx + size] def read_at(self, offset, size): return self.__buff[offset:offset + size] def read(self, size): if isinstance(size, SV): size = size.value buff = self.__buff[self.__idx:self.__idx + size] self.__idx += size return buff def end(self): return self.__idx == len(self.__buff) class Buff(object): def __init__(self, offset, buff): self.offset = offset self.buff = buff self.size = len(buff) class _Bytecode(object): def __init__(self, buff): self.__buff = buff self.__idx = 0 def read(self, size): if isinstance(size, SV): size = size.value buff = self.__buff[self.__idx:self.__idx + size] self.__idx += size return buff def readat(self, off): if isinstance(off, SV): off = off.value return self.__buff[off:] def read_b(self, size): return self.__buff[self.__idx:self.__idx + size] def set_idx(self, idx): self.__idx = idx def get_idx(self): return self.__idx def add_idx(self, idx): self.__idx += idx def register(self, type_register, fct): self.__registers[type_register].append(fct) def get_buff(self): return self.__buff def length_buff(self): return len(self.__buff) def set_buff(self, buff): self.__buff = buff def save(self, filename): buff = self._save() with open(filename, "w") as fd: fd.write(buff) def FormatClassToJava(input): """ Transoform a typical xml format class into java format :param input: the input class name :rtype: string """ return "L" + input.replace(".", "/") + ";" def FormatClassToPython(input): i = input[:-1] i = i.replace("/", "_") i = i.replace("$", "_") return i def FormatNameToPython(input): i = input.replace("<", "") i = i.replace(">", "") i = i.replace("$", "_") return i def FormatDescriptorToPython(input): i = input.replace("/", "_") i = i.replace(";", "") i = i.replace("[", "") i = i.replace("(", "") i = i.replace(")", "") i = i.replace(" ", "") i = i.replace("$", "") return i class Node(object): def __init__(self, n, s): self.id = n self.title = s self.children = []
opendoor/django-gather
refs/heads/master
gather/migrations/0012_auto__add_field_location_slug.py
2
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Location.slug' db.add_column('gather_location', 'slug', self.gf('django.db.models.fields.CharField')(default='locationslug', max_length=60), keep_default=False) def backwards(self, orm): # Deleting field 'Location.slug' db.delete_column('gather_location', 'slug') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'core.location': { 'Meta': {'object_name': 'Location'}, 'about_page': ('django.db.models.fields.TextField', [], {}), 'address': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'default_from_email': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'email_subject_prefix': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'front_page_participate': ('django.db.models.fields.TextField', [], {}), 'front_page_stay': ('django.db.models.fields.TextField', [], {}), 'house_access_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'house_admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'house_admin'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mailgun_api_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'mailgun_domain': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'max_reservation_days': ('django.db.models.fields.IntegerField', [], {'default': '14'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'residents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'residences'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'short_description': ('django.db.models.fields.TextField', [], {}), 'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'stay_page': ('django.db.models.fields.TextField', [], {}), 'stripe_public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'stripe_secret_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'welcome_email_days_ahead': ('django.db.models.fields.IntegerField', [], {'default': '2'}) }, 'gather.event': { 'Meta': {'object_name': 'Event'}, 'admin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': "orm['gather.EventAdminGroup']"}), 'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'events_attending'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events_created'", 'to': "orm['auth.User']"}), 'description': ('django.db.models.fields.TextField', [], {}), 'end': ('django.db.models.fields.DateTimeField', [], {}), 'endorsements': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'events_endorsed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'limit': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Location']"}), 'notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizer_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'events_organized'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'series': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': "orm['gather.EventSeries']"}), 'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'start': ('django.db.models.fields.DateTimeField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'default': "'waiting for approval'", 'max_length': '200', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'where': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'gather.eventadmingroup': { 'Meta': {'object_name': 'EventAdminGroup'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Location']", 'null': 'True', 'blank': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}) }, 'gather.eventnotifications': { 'Meta': {'object_name': 'EventNotifications'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'reminders': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'event_notifications'", 'unique': 'True', 'to': "orm['auth.User']"}), 'weekly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'gather.eventseries': { 'Meta': {'object_name': 'EventSeries'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'gather.location': { 'Meta': {'object_name': 'Location'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '60'}) } } complete_apps = ['gather']
liamgh/liamgreenhughes-sl4a-tf101
refs/heads/master
python/src/Lib/test/test_pyexpat.py
51
# XXX TypeErrors on calling handlers, or on bad return values from a # handler, are obscure and unhelpful. import StringIO, sys import unittest import pyexpat from xml.parsers import expat from test.test_support import sortdict, run_unittest class SetAttributeTest(unittest.TestCase): def setUp(self): self.parser = expat.ParserCreate(namespace_separator='!') self.set_get_pairs = [ [0, 0], [1, 1], [2, 1], [0, 0], ] def test_returns_unicode(self): for x, y in self.set_get_pairs: self.parser.returns_unicode = x self.assertEquals(self.parser.returns_unicode, y) def test_ordered_attributes(self): for x, y in self.set_get_pairs: self.parser.ordered_attributes = x self.assertEquals(self.parser.ordered_attributes, y) def test_specified_attributes(self): for x, y in self.set_get_pairs: self.parser.specified_attributes = x self.assertEquals(self.parser.specified_attributes, y) data = '''\ <?xml version="1.0" encoding="iso-8859-1" standalone="no"?> <?xml-stylesheet href="stylesheet.css"?> <!-- comment data --> <!DOCTYPE quotations SYSTEM "quotations.dtd" [ <!ELEMENT root ANY> <!NOTATION notation SYSTEM "notation.jpeg"> <!ENTITY acirc "&#226;"> <!ENTITY external_entity SYSTEM "entity.file"> <!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation> %unparsed_entity; ]> <root attr1="value1" attr2="value2&#8000;"> <myns:subelement xmlns:myns="http://www.python.org/namespace"> Contents of subelements </myns:subelement> <sub2><![CDATA[contents of CDATA section]]></sub2> &external_entity; </root> ''' # Produce UTF-8 output class ParseTest(unittest.TestCase): class Outputter: def __init__(self): self.out = [] def StartElementHandler(self, name, attrs): self.out.append('Start element: ' + repr(name) + ' ' + sortdict(attrs)) def EndElementHandler(self, name): self.out.append('End element: ' + repr(name)) def CharacterDataHandler(self, data): data = data.strip() if data: self.out.append('Character data: ' + repr(data)) def ProcessingInstructionHandler(self, target, data): self.out.append('PI: ' + repr(target) + ' ' + repr(data)) def StartNamespaceDeclHandler(self, prefix, uri): self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri)) def EndNamespaceDeclHandler(self, prefix): self.out.append('End of NS decl: ' + repr(prefix)) def StartCdataSectionHandler(self): self.out.append('Start of CDATA section') def EndCdataSectionHandler(self): self.out.append('End of CDATA section') def CommentHandler(self, text): self.out.append('Comment: ' + repr(text)) def NotationDeclHandler(self, *args): name, base, sysid, pubid = args self.out.append('Notation declared: %s' %(args,)) def UnparsedEntityDeclHandler(self, *args): entityName, base, systemId, publicId, notationName = args self.out.append('Unparsed entity decl: %s' %(args,)) def NotStandaloneHandler(self, userData): self.out.append('Not standalone') return 1 def ExternalEntityRefHandler(self, *args): context, base, sysId, pubId = args self.out.append('External entity ref: %s' %(args[1:],)) return 1 def DefaultHandler(self, userData): pass def DefaultHandlerExpand(self, userData): pass handler_names = [ 'StartElementHandler', 'EndElementHandler', 'CharacterDataHandler', 'ProcessingInstructionHandler', 'UnparsedEntityDeclHandler', 'NotationDeclHandler', 'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler', 'CommentHandler', 'StartCdataSectionHandler', 'EndCdataSectionHandler', 'DefaultHandler', 'DefaultHandlerExpand', #'NotStandaloneHandler', 'ExternalEntityRefHandler' ] def test_utf8(self): out = self.Outputter() parser = expat.ParserCreate(namespace_separator='!') for name in self.handler_names: setattr(parser, name, getattr(out, name)) parser.returns_unicode = 0 parser.Parse(data, 1) # Verify output op = out.out self.assertEquals(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'') self.assertEquals(op[1], "Comment: ' comment data '") self.assertEquals(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)") self.assertEquals(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')") self.assertEquals(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}") self.assertEquals(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'") self.assertEquals(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}") self.assertEquals(op[7], "Character data: 'Contents of subelements'") self.assertEquals(op[8], "End element: 'http://www.python.org/namespace!subelement'") self.assertEquals(op[9], "End of NS decl: 'myns'") self.assertEquals(op[10], "Start element: 'sub2' {}") self.assertEquals(op[11], 'Start of CDATA section') self.assertEquals(op[12], "Character data: 'contents of CDATA section'") self.assertEquals(op[13], 'End of CDATA section') self.assertEquals(op[14], "End element: 'sub2'") self.assertEquals(op[15], "External entity ref: (None, 'entity.file', None)") self.assertEquals(op[16], "End element: 'root'") def test_unicode(self): # Try the parse again, this time producing Unicode output out = self.Outputter() parser = expat.ParserCreate(namespace_separator='!') parser.returns_unicode = 1 for name in self.handler_names: setattr(parser, name, getattr(out, name)) parser.Parse(data, 1) op = out.out self.assertEquals(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') self.assertEquals(op[1], "Comment: u' comment data '") self.assertEquals(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") self.assertEquals(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") self.assertEquals(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") self.assertEquals(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") self.assertEquals(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") self.assertEquals(op[7], "Character data: u'Contents of subelements'") self.assertEquals(op[8], "End element: u'http://www.python.org/namespace!subelement'") self.assertEquals(op[9], "End of NS decl: u'myns'") self.assertEquals(op[10], "Start element: u'sub2' {}") self.assertEquals(op[11], 'Start of CDATA section') self.assertEquals(op[12], "Character data: u'contents of CDATA section'") self.assertEquals(op[13], 'End of CDATA section') self.assertEquals(op[14], "End element: u'sub2'") self.assertEquals(op[15], "External entity ref: (None, u'entity.file', None)") self.assertEquals(op[16], "End element: u'root'") def test_parse_file(self): # Try parsing a file out = self.Outputter() parser = expat.ParserCreate(namespace_separator='!') parser.returns_unicode = 1 for name in self.handler_names: setattr(parser, name, getattr(out, name)) file = StringIO.StringIO(data) parser.ParseFile(file) op = out.out self.assertEquals(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'') self.assertEquals(op[1], "Comment: u' comment data '") self.assertEquals(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)") self.assertEquals(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')") self.assertEquals(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}") self.assertEquals(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'") self.assertEquals(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}") self.assertEquals(op[7], "Character data: u'Contents of subelements'") self.assertEquals(op[8], "End element: u'http://www.python.org/namespace!subelement'") self.assertEquals(op[9], "End of NS decl: u'myns'") self.assertEquals(op[10], "Start element: u'sub2' {}") self.assertEquals(op[11], 'Start of CDATA section') self.assertEquals(op[12], "Character data: u'contents of CDATA section'") self.assertEquals(op[13], 'End of CDATA section') self.assertEquals(op[14], "End element: u'sub2'") self.assertEquals(op[15], "External entity ref: (None, u'entity.file', None)") self.assertEquals(op[16], "End element: u'root'") class NamespaceSeparatorTest(unittest.TestCase): def test_legal(self): # Tests that make sure we get errors when the namespace_separator value # is illegal, and that we don't for good values: expat.ParserCreate() expat.ParserCreate(namespace_separator=None) expat.ParserCreate(namespace_separator=' ') def test_illegal(self): try: expat.ParserCreate(namespace_separator=42) self.fail() except TypeError, e: self.assertEquals(str(e), 'ParserCreate() argument 2 must be string or None, not int') try: expat.ParserCreate(namespace_separator='too long') self.fail() except ValueError, e: self.assertEquals(str(e), 'namespace_separator must be at most one character, omitted, or None') def test_zero_length(self): # ParserCreate() needs to accept a namespace_separator of zero length # to satisfy the requirements of RDF applications that are required # to simply glue together the namespace URI and the localname. Though # considered a wart of the RDF specifications, it needs to be supported. # # See XML-SIG mailing list thread starting with # http://mail.python.org/pipermail/xml-sig/2001-April/005202.html # expat.ParserCreate(namespace_separator='') # too short class InterningTest(unittest.TestCase): def test(self): # Test the interning machinery. p = expat.ParserCreate() L = [] def collector(name, *args): L.append(name) p.StartElementHandler = collector p.EndElementHandler = collector p.Parse("<e> <e/> <e></e> </e>", 1) tag = L[0] self.assertEquals(len(L), 6) for entry in L: # L should have the same string repeated over and over. self.assertTrue(tag is entry) class BufferTextTest(unittest.TestCase): def setUp(self): self.stuff = [] self.parser = expat.ParserCreate() self.parser.buffer_text = 1 self.parser.CharacterDataHandler = self.CharacterDataHandler def check(self, expected, label): self.assertEquals(self.stuff, expected, "%s\nstuff = %r\nexpected = %r" % (label, self.stuff, map(unicode, expected))) def CharacterDataHandler(self, text): self.stuff.append(text) def StartElementHandler(self, name, attrs): self.stuff.append("<%s>" % name) bt = attrs.get("buffer-text") if bt == "yes": self.parser.buffer_text = 1 elif bt == "no": self.parser.buffer_text = 0 def EndElementHandler(self, name): self.stuff.append("</%s>" % name) def CommentHandler(self, data): self.stuff.append("<!--%s-->" % data) def setHandlers(self, handlers=[]): for name in handlers: setattr(self.parser, name, getattr(self, name)) def test_default_to_disabled(self): parser = expat.ParserCreate() self.assertFalse(parser.buffer_text) def test_buffering_enabled(self): # Make sure buffering is turned on self.assertTrue(self.parser.buffer_text) self.parser.Parse("<a>1<b/>2<c/>3</a>", 1) self.assertEquals(self.stuff, ['123'], "buffered text not properly collapsed") def test1(self): # XXX This test exposes more detail of Expat's text chunking than we # XXX like, but it tests what we need to concisely. self.setHandlers(["StartElementHandler"]) self.parser.Parse("<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", 1) self.assertEquals(self.stuff, ["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"], "buffering control not reacting as expected") def test2(self): self.parser.Parse("<a>1<b/>&lt;2&gt;<c/>&#32;\n&#x20;3</a>", 1) self.assertEquals(self.stuff, ["1<2> \n 3"], "buffered text not properly collapsed") def test3(self): self.setHandlers(["StartElementHandler"]) self.parser.Parse("<a>1<b/>2<c/>3</a>", 1) self.assertEquals(self.stuff, ["<a>", "1", "<b>", "2", "<c>", "3"], "buffered text not properly split") def test4(self): self.setHandlers(["StartElementHandler", "EndElementHandler"]) self.parser.CharacterDataHandler = None self.parser.Parse("<a>1<b/>2<c/>3</a>", 1) self.assertEquals(self.stuff, ["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"]) def test5(self): self.setHandlers(["StartElementHandler", "EndElementHandler"]) self.parser.Parse("<a>1<b></b>2<c/>3</a>", 1) self.assertEquals(self.stuff, ["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"]) def test6(self): self.setHandlers(["CommentHandler", "EndElementHandler", "StartElementHandler"]) self.parser.Parse("<a>1<b/>2<c></c>345</a> ", 1) self.assertEquals(self.stuff, ["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"], "buffered text not properly split") def test7(self): self.setHandlers(["CommentHandler", "EndElementHandler", "StartElementHandler"]) self.parser.Parse("<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", 1) self.assertEquals(self.stuff, ["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "<!--abc-->", "4", "<!--def-->", "5", "</a>"], "buffered text not properly split") # Test handling of exception from callback: class HandlerExceptionTest(unittest.TestCase): def StartElementHandler(self, name, attrs): raise RuntimeError(name) def test(self): parser = expat.ParserCreate() parser.StartElementHandler = self.StartElementHandler try: parser.Parse("<a><b><c/></b></a>", 1) self.fail() except RuntimeError, e: self.assertEquals(e.args[0], 'a', "Expected RuntimeError for element 'a', but" + \ " found %r" % e.args[0]) # Test Current* members: class PositionTest(unittest.TestCase): def StartElementHandler(self, name, attrs): self.check_pos('s') def EndElementHandler(self, name): self.check_pos('e') def check_pos(self, event): pos = (event, self.parser.CurrentByteIndex, self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber) self.assertTrue(self.upto < len(self.expected_list), 'too many parser events') expected = self.expected_list[self.upto] self.assertEquals(pos, expected, 'Expected position %s, got position %s' %(pos, expected)) self.upto += 1 def test(self): self.parser = expat.ParserCreate() self.parser.StartElementHandler = self.StartElementHandler self.parser.EndElementHandler = self.EndElementHandler self.upto = 0 self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2), ('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)] xml = '<a>\n <b>\n <c/>\n </b>\n</a>' self.parser.Parse(xml, 1) class sf1296433Test(unittest.TestCase): def test_parse_only_xml_data(self): # http://python.org/sf/1296433 # xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * 1025) # this one doesn't crash #xml = "<?xml version='1.0'?><s>%s</s>" % ('a' * 10000) class SpecificException(Exception): pass def handler(text): raise SpecificException parser = expat.ParserCreate() parser.CharacterDataHandler = handler self.assertRaises(Exception, parser.Parse, xml) class ChardataBufferTest(unittest.TestCase): """ test setting of chardata buffer size """ def test_1025_bytes(self): self.assertEquals(self.small_buffer_test(1025), 2) def test_1000_bytes(self): self.assertEquals(self.small_buffer_test(1000), 1) def test_wrong_size(self): parser = expat.ParserCreate() parser.buffer_text = 1 def f(size): parser.buffer_size = size self.assertRaises(TypeError, f, sys.maxint+1) self.assertRaises(ValueError, f, -1) self.assertRaises(ValueError, f, 0) def test_unchanged_size(self): xml1 = ("<?xml version='1.0' encoding='iso8859'?><s>%s" % ('a' * 512)) xml2 = 'a'*512 + '</s>' parser = expat.ParserCreate() parser.CharacterDataHandler = self.counting_handler parser.buffer_size = 512 parser.buffer_text = 1 # Feed 512 bytes of character data: the handler should be called # once. self.n = 0 parser.Parse(xml1) self.assertEquals(self.n, 1) # Reassign to buffer_size, but assign the same size. parser.buffer_size = parser.buffer_size self.assertEquals(self.n, 1) # Try parsing rest of the document parser.Parse(xml2) self.assertEquals(self.n, 2) def test_disabling_buffer(self): xml1 = "<?xml version='1.0' encoding='iso8859'?><a>%s" % ('a' * 512) xml2 = ('b' * 1024) xml3 = "%s</a>" % ('c' * 1024) parser = expat.ParserCreate() parser.CharacterDataHandler = self.counting_handler parser.buffer_text = 1 parser.buffer_size = 1024 self.assertEquals(parser.buffer_size, 1024) # Parse one chunk of XML self.n = 0 parser.Parse(xml1, 0) self.assertEquals(parser.buffer_size, 1024) self.assertEquals(self.n, 1) # Turn off buffering and parse the next chunk. parser.buffer_text = 0 self.assertFalse(parser.buffer_text) self.assertEquals(parser.buffer_size, 1024) for i in range(10): parser.Parse(xml2, 0) self.assertEquals(self.n, 11) parser.buffer_text = 1 self.assertTrue(parser.buffer_text) self.assertEquals(parser.buffer_size, 1024) parser.Parse(xml3, 1) self.assertEquals(self.n, 12) def make_document(self, bytes): return ("<?xml version='1.0'?><tag>" + bytes * 'a' + '</tag>') def counting_handler(self, text): self.n += 1 def small_buffer_test(self, buffer_len): xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * buffer_len) parser = expat.ParserCreate() parser.CharacterDataHandler = self.counting_handler parser.buffer_size = 1024 parser.buffer_text = 1 self.n = 0 parser.Parse(xml) return self.n def test_change_size_1(self): xml1 = "<?xml version='1.0' encoding='iso8859'?><a><s>%s" % ('a' * 1024) xml2 = "aaa</s><s>%s</s></a>" % ('a' * 1025) parser = expat.ParserCreate() parser.CharacterDataHandler = self.counting_handler parser.buffer_text = 1 parser.buffer_size = 1024 self.assertEquals(parser.buffer_size, 1024) self.n = 0 parser.Parse(xml1, 0) parser.buffer_size *= 2 self.assertEquals(parser.buffer_size, 2048) parser.Parse(xml2, 1) self.assertEquals(self.n, 2) def test_change_size_2(self): xml1 = "<?xml version='1.0' encoding='iso8859'?><a>a<s>%s" % ('a' * 1023) xml2 = "aaa</s><s>%s</s></a>" % ('a' * 1025) parser = expat.ParserCreate() parser.CharacterDataHandler = self.counting_handler parser.buffer_text = 1 parser.buffer_size = 2048 self.assertEquals(parser.buffer_size, 2048) self.n=0 parser.Parse(xml1, 0) parser.buffer_size /= 2 self.assertEquals(parser.buffer_size, 1024) parser.Parse(xml2, 1) self.assertEquals(self.n, 4) def test_main(): run_unittest(SetAttributeTest, ParseTest, NamespaceSeparatorTest, InterningTest, BufferTextTest, HandlerExceptionTest, PositionTest, sf1296433Test, ChardataBufferTest) if __name__ == "__main__": test_main()
flipreverse/systemtap-android-src
refs/heads/android
scripts/kprobes_test/kprobes_test.py
15
#!/usr/bin/python # Copyright (C) 2010 Red Hat Inc. # # This file is part of systemtap, and is free software. You can # redistribute it and/or modify it under the terms of the GNU General # Public License (GPL); either version 2, or (at your option) any # later version. import os import pickle import re import sys import time from config_opts import config_opts from gen_code import gen_module from run_module import run_module class BucketSet(object): def __init__(self, bucketA=list(), bucketA_result=0, bucketB=list(), bucketB_result=0, buckets=list(), passed=list(), failed=list(), untriggered=list(), unregistered=list(), split=0): # bucketA is the list of probes to test. self.bucketA = bucketA # bucketA_result tells us what state bucketA is in self.bucketA_result = bucketA_result # bucketB is the 2nd (optional) list of probes to test self.bucketB = bucketB # bucketB_result tells us what state bucketB is in self.bucketB_result = bucketB_result # buckets is the full list of probes to test (originally # filled in by reading config_opts['probes_all']) self.buckets = buckets # passed is the list of probe points that were sucessfully # registered and triggered (actually called) self.passed = passed # failed is the list of probe points that were sucessfully # registered but caused a crash. The probe lists have been # bisected and singly eliminated. self.failed = failed # untriggered is the list of probe points that were sucessfully # registered and but never triggered (actually called) self.untriggered = untriggered # untriggered is the list of probe points that couldn't be # registered self.unregistered = unregistered # split tells us how to split a bucket. 0 means bisect # it, anything else is the number of times we've tried to do a # 1-by-1 split. self.split = split def bucketA_result_str(self): if self.bucketA_result == 0: return "untested" elif self.bucketA_result == 1: return "succeeded" elif self.bucketA_result == 2: return "failed" else: return "UNKNOWN" def bucketB_result_str(self): if self.bucketB_result == 0: return "untested" elif self.bucketB_result == 1: return "succeeded" elif self.bucketB_result == 2: return "failed" else: return "UNKNOWN" probes = BucketSet() # Install this script in config_opts['rclocal'] (typically # /etc/rc.d/rc.local). Returns true if the line wasn't already there # (which means this is the 1st time the script has been run). def register_script(install=True): rc = True regexp = re.compile("^.+/kprobes_test.py") f = open(config_opts['rclocal'], "r+") data = "" for line in f: if not regexp.match(line): data += line else: rc = False if install: data += "cd %s/ && ./kprobes_test.py &" % os.getcwd() f.seek(0, 0) f.write(data) f.truncate(f.tell()) f.close() return rc # Because it is very possible that we'll crash the system, we need to # make sure our data files are written to the disk sucessfully before # we run a module. sync_disks() makes sure everything written is # actually saved. def sync_disks(): os.system("sync; sync; sync") time.sleep(5) return def read_probe_list(): global probes # If the "pickled" file exists, read it in to recover our state if os.path.exists(config_opts['probes_db']): print "Reading state..." f = open(config_opts['probes_db']) p = pickle.Unpickler(f) probes = p.load() f.close() # if the 'probes_all' file exists, create the data from it elif os.path.exists(config_opts['probes_all']): # Read in the flat file f = open(config_opts['probes_all']) probe_lines = f.readlines() f.close() # Create the full bucket full_bucket = list() for line in probe_lines: full_bucket.append(line.rstrip()) probes.buckets.append(full_bucket) # create the probes_all file? else: print >>sys.stderr, ("Could not find probes file") sys.exit(1) def write_probe_list(): global probes print "Writing state..." f = open(config_opts['probes_db'], 'w') p = pickle.Pickler(f) p.dump(probes) f.close() def display_probe_list(): global probes i = 0 print "bucketA (%s) has %d entries" % \ (probes.bucketA_result_str(), len(probes.bucketA)) print "bucketB (%s) has %d entries" % \ (probes.bucketB_result_str(), len(probes.bucketB)) for bucket in probes.buckets: print "set %d has %d entries" % (i, len(bucket)) i += 1 print "passed set has %d entries" % len(probes.passed) total = 0 for bucket in probes.failed: total += len(bucket) print "failed set has %d entries (in %d buckets)" % \ (total, len(probes.failed)) print "untriggered set has %d entries" % len(probes.untriggered) print "unregistered set has %d entries" % len(probes.unregistered) def reset_buckets(): global probes probes.bucketA = list() probes.bucketA_result = 0 probes.bucketB = list() probes.bucketB_result = 0 probes.split = 0 def grab_bucket(): global probes reset_buckets() # Try to grab the 1st bucket from the list. if len(probes.buckets) > 0: bucket = probes.buckets[0] del probes.buckets[0] # if the bucket has more than 1000 probes, limit it to 1000 if len(bucket) > 1000: probes.bucketA = bucket[0:1000] probes.bucketA_result = 0 probes.bucketB_result = 0 rest = bucket[1000:] probes.buckets.insert(0, rest) # otherwise just use the bucket else: probes.bucketA = bucket return True else: print "no buckets left" return False def split_bucket(bucket): global probes if probes.split == 0: split = len(bucket) / 2 else: split = 1 bucketA = bucket[0:split] bucketB = bucket[split:] probes.bucketA = bucketA probes.bucketA_result = 0 probes.bucketB = bucketB probes.bucketB_result = 0 def update_buckets(failed=True): global probes ret = True # if we don't have a current set, get one if len(probes.bucketA) == 0: ret = grab_bucket() # if bucketA is set, we've just finished up with it (or bucketB). elif len(probes.bucketA) > 0: if probes.bucketA_result == 0: if not failed: probes.bucketA_result = 1 else: probes.bucketA_result = 2 print "bucketA %s with %d probes..." % \ (probes.bucketA_result_str(), len(probes.bucketA)) else: if not failed: probes.bucketB_result = 1 else: probes.bucketB_result = 2 print "bucketB %s with %d probes..." % \ (probes.bucketB_result_str(), len(probes.bucketB)) # OK, we've got several cases here. # (1) Only bucketA was set. if len(probes.bucketB) == 0: print "case (1)..." # (1a) If bucketA passed, put it on the passed list and grab # the next bucket. if not failed: probes.passed.extend(probes.bucketA) probes.split = 0 ret = grab_bucket() # (1b) If bucketA failed and is more than 1 probe, split it. elif len(probes.bucketA) > 1: split_bucket(probes.bucketA) # (1c) if bucketA failed and was only 1 probe, put it on # the failed list and grab the next bucket. else: probes.failed.append(probes.bucketA) ret = grab_bucket() # (2) Both bucketA and bucketB were set, but bucketB hasn't # been tested yet. elif probes.bucketB_result == 0: # Do nothing and let bucketB be tested. print "case (2)..." pass # (3) Both bucketA and bucketB have been tested. Figure out # what to do next. else: print "case (3)..." # (3a) bucketA passed, bucketB failed. Move bucketA to # the passed list and split bucketB. if probes.bucketA_result == 1 and probes.bucketB_result == 2: print "case (3a)..." probes.passed.extend(probes.bucketA) probes.split = 0 # (3a1) If bucketB failed and is more than 1 probe, # split it. if len(probes.bucketB) > 1: split_bucket(probes.bucketB) # (3a2) If bucketB failed and was only 1 probe, put it # on the failed list and grab the next bucket. else: probes.failed.append(probes.bucketB) ret = grab_bucket() # (3b) bucketA failed, bucketB passed. Move bucketB to # the passed list and split bucketA. elif probes.bucketA_result == 2 and probes.bucketB_result == 1: print "case (3b)..." probes.passed.extend(probes.bucketB) probes.split = 0 # (3b1) If bucketA failed and is more than 1 probe, # split it. if len(probes.bucketA) > 1: split_bucket(probes.bucketA) # (3b2) If bucketA failed and was only 1 probe, put it # on the failed list and grab the next bucket. else: probes.failed.append(probes.bucketA) ret = grab_bucket() # (3c) Both buckets failed. elif probes.bucketA_result == 2 and probes.bucketB_result == 2: print "case (3c)..." # (3c1) bucketA and bucketB were both just 1 probe # each. Put them both on the failed list and grab the # next bucket. if len(probes.bucketA) == 1 and len(probes.bucketB) == 1: print "case (3c1)..." probes.failed.append(probes.bucketA) probes.failed.append(probes.bucketB) ret = grab_bucket() # (3c2) bucketA was just 1 probe, but bucketB was more # than 1 probe elif len(probes.bucketA) == 1 and len(probes.bucketB) > 1: print "case (3c2)..." probes.failed.append(probes.bucketA) split_bucket(probes.bucketB) # (3c3) bucketA was more than 1 probe, but bucketB was # just 1 probe elif len(probes.bucketA) > 1 and len(probes.bucketB) == 1: print "case (3c3)..." probes.failed.append(probes.bucketB) split_bucket(probes.bucketA) # (3c4) Both buckets were more than 1 probe. In this # case, split bucketA and put bucketB back on the main # list. This will cause bucketB to get tested twice. else: print "case (3c4)..." probes.buckets.insert(0, probes.bucketB) split_bucket(probes.bucketA) # (3d) Both buckets passed. This sounds good, but this # means that together bucketA and bucketB failed, but # separately they passed. So, the combination of the sets # is the problem. else: print "case (3d)..." # (3d1) If both buckets are just 1 probe, combine the # buckets into one and put it on the failed list. # Then grab the next bucket. if len(probes.bucketA) == 1 and len(probes.bucketB) == 1: bucket = probes.bucketA + probes.bucketB probes.failed.append(bucket) ret = grab_bucket() # (3d2) Combine both buckets here, and try eliminating # the probes 1-by-1. We'll go back to bisecting if we # can remove 1 probe from this list or the list is # indivisible (case 3d3a). elif probes.split == 0: probes.split = 1 bucket = probes.bucketA + probes.bucketB split_bucket(bucket) # (3d3) We're eliminating the probes 1-by-1 and the # last attempt failed. Reverse the order and try # again. else: probes.split += 1 print "case (3d3): bucketA(%d) bucketB(%d)" % (len(probes.bucketA), len(probes.bucketB)) bucket = probes.bucketB + probes.bucketA print "case (3d3): combined bucket(%d)" % (len(bucket)) if len(bucket) > 500: print >>sys.stderr, "Error: bucket grew?" return -1 # (3d3a) If we've tried every probe singly, and # they all still worked, the combination is still # the problem. So, we're done trying 1-by-1 # elimination. if probes.split > len(bucket): probes.split = 0 probes.failed.append(bucket) ret = grab_bucket() # (3d3b) Keep trying 1-by-1 splits. else: split_bucket(bucket) write_probe_list() return ret def save_bucket(): global probes f = open(config_opts['probes_current'], 'w') if len(probes.bucketA) > 0 and probes.bucketA_result == 0: f.write("\n".join(probes.bucketA)) elif len(probes.bucketB) > 0 and probes.bucketB_result == 0: f.write("\n".join(probes.bucketB)) else: print "Error: no bucket to write?" f.close() def parse_module_output(): global probes # Parse the output file, looking for probe points pp_re = re.compile(": (-?\d+) (\S+)$") f = open(config_opts['probes_result'], 'r') pp = dict() line = f.readline() while line: match = pp_re.search(line) if match: pp[match.group(2)] = int(match.group(1)) line = f.readline() f.close() if len(pp.keys()) == 0: print >>sys.stderr, "No data found?" return 1 # We're done with the result file os.unlink(config_opts['probes_result']) # Parse the list of probe points. Since the result fields haven't # been updated yet, pick the 1st bucket that has a status of 0 # (untested). if probes.bucketA_result == 0: bucket = probes.bucketA else: bucket = probes.bucketB new_bucket = list() for probe in bucket: if pp.has_key(probe): # > 0 == passed (registered and triggered) if pp[probe] > 0: new_bucket.append(probe) # 0 == untriggered elif pp[probe] == 0: probes.untriggered.append(probe) # -1 == unregistered elif pp[probe] == -1: probes.unregistered.append(probe) else: print >>sys.stderr, "failed probe %s?" % probe else: print >>sys.stderr, "Couldn't find %s?" % probe # OK, we've gone through and removed all the # untriggered/unregistered probes from bucket. Update the # proper bucket. if probes.bucketA_result == 0: # Oops, all the probes were unregistered/untriggered. if len(new_bucket) == 0: # If we've got a bucketB, we'll think it just succeeded. # So, put it back on the bucket list so it will get # correctly processed. if len(probes.bucketB): print "Re-inserting bucketB..." probes.buckets.insert(0, probes.bucketB) # Reset everything reset_buckets() else: probes.bucketA = new_bucket else: # If bucketB ended up with 0 entries, we need to handle # bucketA. if len(new_bucket) == 0: # bucketA suceeded and bucketB ended up with 0 entries. # Put bucketA on the 'passed' list. if probes.bucketA_result == 1: probes.passed.extend(probes.bucketA) # bucketA failed and bucketB ended up with 0 entries. # Put bucketA on the bucket list to get processed again. # This will cause it to get tested twice, but it is the # easiest way out. else: print "Re-inserting bucketA..." probes.buckets.insert(0, probes.bucketA) # Reset everything. reset_buckets() else: probes.bucketB = new_bucket return 0 def run_tests(): status = True failed = True while status: status = update_buckets(failed) display_probe_list() if not status: break # Generate the module. save_bucket() rc = gen_module() if rc != 0: sys.exit(rc) # Run the module. sync_disks() rc = run_module() if rc != 0: sys.exit(rc) # Parse the module output. rc = parse_module_output() if rc != 0: sys.exit(rc) # If we're here, the current module was loaded and unloaded # successfully. failed = False def dump_output(): global probes f = open(config_opts['probes_passed'], 'w') if len(probes.passed) > 0: f.write("\n".join(probes.passed)) f.close() # probes.failed is a list of lists. f = open(config_opts['probes_failed'], 'w') for bucket in probes.failed: f.write("\n".join(bucket)) f.write("\n#\n") f.close() f = open(config_opts['probes_untriggered'], 'w') if len(probes.untriggered) > 0: f.write("\n".join(probes.untriggered)) f.close() f = open(config_opts['probes_unregistered'], 'w') if len(probes.unregistered) > 0: f.write("\n".join(probes.unregistered)) f.close() # Make sure we're running as root. if os.getuid() != 0: print >>sys.stderr, "Error: this script must be run by root" sys.exit(1) # Register this script. If this is the 1st time we've been run, start # from scratch by removing old state files. if register_script(): print >>sys.stderr, "Removing old state files..." if os.path.exists(config_opts['log_file']): os.unlink(config_opts['log_file']) if os.path.exists(config_opts['probes_db']): os.unlink(config_opts['probes_db']) # Redirect stdout and stderr sys.stdout.flush() sys.stderr.flush() so = open(config_opts['log_file'], 'a+', 0) # no buffering os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(so.fileno(), sys.stderr.fileno()) # Go! read_probe_list() run_tests() # Finish up. dump_output() register_script(False) print >>sys.stderr, "Finished."
jones139/ChEWS
refs/heads/master
svgwrite/data/svgparser.py
3
#!/usr/bin/env python #coding:utf-8 # Author: mozman --<mozman@gmx.at> # Purpose: svgparser using pyparser # Created: 16.10.2010 # Copyright (C) 2010, Manfred Moitzi # License: MIT License # depends on: pyparsing.py by Paul T. McGuire - http://pyparsing.wikispaces.com/ __all__ = ["is_valid_transferlist", "is_valid_pathdata", "is_valid_animation_timing"] import sys from pyparsing import * from functools import partial event_names = [ "focusin", "focusout", "activate", "click", "mousedown", "mouseup", "mouseover", "mousemove", "mouseout", "DOMSubtreeModified", "DOMNodeInserted", "DOMNodeRemoved", "DOMNodeRemovedFromDocument", "DOMNodeInsertedtoDocument", "DOMAttrModified", "DOMCharacterDataModified", "SVGLoad", "SVGUnload", "SVGAbort", "SVGError", "SVGResize", "SVGScroll", "SVGZoom", "beginEvent", "endEvent", "repeatEvent", ] sign = oneOf('+ -') comma = Literal(',') * (0, 1) # zero or one ',' semicolon = Literal(';') * (0, 1) # zero or one ';' integer_constant = Word(nums) exponent = CaselessLiteral('E') + Optional(sign) + integer_constant fractional_constant = Combine(Optional(integer_constant) + '.' + integer_constant) \ ^ Combine(integer_constant + '.') scientific_constant = Combine(fractional_constant + Optional(exponent)) \ ^ Combine(integer_constant + exponent) number = Combine(Optional(sign) + integer_constant) \ ^ Combine(Optional(sign) + scientific_constant) def has_valid_syntax(term, parser): try: parser.parseString(term, parseAll=True) return True except ParseException: return False def build_transferlist_parser(): matrix = Literal("matrix") + '(' + number + (Suppress(comma) + number) * 5 + ')' translate = Literal("translate") + '(' + number + Optional(comma + number) + ')' scale = Literal("scale") + '(' + number + Optional(comma + number) + ')' rotate = Literal("rotate") + '(' + number + Optional(comma + number + comma + number) + ')' skewX = Literal("skewX") + '(' + number + ')' skewY = Literal("skewY") + '(' + number + ')' transform = matrix | translate | scale | rotate | skewX | skewY return transform + ZeroOrMore(comma + transform) transferlist_parser = build_transferlist_parser() is_valid_transferlist = partial(has_valid_syntax, parser=transferlist_parser) def build_pathdata_parser(): coordinate = number coordinate_pair = coordinate + comma + coordinate nonnegative_number = integer_constant ^ scientific_constant flag = oneOf('0 1') comma_delimited_coordinates = coordinate + ZeroOrMore(comma + coordinate) comma_delimited_coordinate_pairs = coordinate_pair + ZeroOrMore(comma + coordinate_pair) closepath = oneOf('Z z') moveto = oneOf('M m') + comma_delimited_coordinate_pairs lineto = oneOf('L l') + comma_delimited_coordinate_pairs horizontal_lineto = oneOf('H h') + comma_delimited_coordinates vertical_lineto = oneOf('V v') + comma_delimited_coordinates curveto_argument_sequence = coordinate_pair + comma + coordinate_pair + comma + coordinate_pair curveto = oneOf('C c') + curveto_argument_sequence + ZeroOrMore(comma + curveto_argument_sequence) smooth_curveto_argument_sequence = coordinate_pair + comma + coordinate_pair smooth_curveto = oneOf('S s') + smooth_curveto_argument_sequence \ + ZeroOrMore(comma + smooth_curveto_argument_sequence) quadratic_bezier_curveto_argument_sequence = coordinate_pair + comma + coordinate_pair quadratic_bezier_curveto = oneOf('Q q') + quadratic_bezier_curveto_argument_sequence \ + ZeroOrMore(comma + quadratic_bezier_curveto_argument_sequence) smooth_quadratic_bezier_curveto = oneOf('T t') + coordinate_pair \ + ZeroOrMore(comma + coordinate_pair) elliptical_arc_argument = nonnegative_number + comma + nonnegative_number \ + comma + number + comma + flag + comma + flag + comma + coordinate_pair elliptical_arc = oneOf('A a') + elliptical_arc_argument \ + ZeroOrMore(comma + elliptical_arc_argument) drawto_command = closepath \ | lineto \ | horizontal_lineto \ | vertical_lineto \ | curveto \ | smooth_curveto \ | quadratic_bezier_curveto \ | smooth_quadratic_bezier_curveto \ | elliptical_arc return OneOrMore(moveto + ZeroOrMore(drawto_command)) pathdata_parser = build_pathdata_parser() is_valid_pathdata = partial(has_valid_syntax, parser=pathdata_parser) def build_clock_val_parser(): digit2 = Word(nums, exact=2) timecount = integer_constant fraction = integer_constant seconds = digit2 minutes = digit2 hours = integer_constant metric = oneOf("h min s ms") timecount_val = timecount + Optional("." + fraction) + Optional(metric) partial_clock_val = minutes + ":" + seconds + Optional("." + fraction) full_clock_val = hours + ":" + minutes + ":" + seconds + Optional("." + fraction) return full_clock_val | partial_clock_val | timecount_val def build_wall_clock_val_parser(): # http://www.w3.org/TR/2005/REC-SMIL2-20050107/smil-timing.html#Timing-WallclockSyncValueSyntax digit2 = Word(nums, exact=2) fraction = integer_constant seconds = digit2 minutes = digit2 hours24 = digit2 day = digit2 month = digit2 year = Word(nums, exact=4) tzd = Literal("Z") | (sign + hours24 + ":" + minutes) hhmmss = hours24 + ":" + minutes + Optional(":" + seconds + Optional("." + fraction)) walltime = hhmmss + Optional(tzd) date = year + "-" + month + "-" + day datetime = date + "T" + walltime return datetime | walltime | date def build_animation_timing_parser(): clock_val = build_clock_val_parser() wallclock_value = build_wall_clock_val_parser() event_ref = oneOf(event_names) # TODO: check id-value definition: is a leading '#' really valid? id_value = Optional("#") + Word(alphanums + "-_") opt_clock_val = Optional(sign + clock_val) wallclock_sync_value = Literal("wallclock(") + wallclock_value + ")" accesskey_value = Literal("accessKey(") + Word(alphas, exact=1) + ")" + opt_clock_val repeat_value = Optional(id_value + ".") + Literal("repeat(") + integer_constant + ")" + opt_clock_val event_value = Optional(id_value + ".") + event_ref + opt_clock_val syncbase_value = id_value + "." + oneOf("begin end") + opt_clock_val offset_value = Optional(sign) + clock_val begin_value = offset_value | syncbase_value | event_value | repeat_value \ | accesskey_value | wallclock_sync_value | Literal("indefinite") return begin_value + ZeroOrMore(semicolon + begin_value) animation_timing_parser = build_animation_timing_parser() is_valid_animation_timing = partial(has_valid_syntax, parser=animation_timing_parser)
renegelinas/mi-instrument
refs/heads/master
mi/dataset/driver/ctdpf_ckl/wfp/test/test_ctdpf_ckl_wfp_telemetered_driver.py
5
from mi.core.log import get_logger import unittest import os from mi.dataset.driver.wc_wm.cspp.wc_wm_cspp_telemetered_driver import parse from mi.dataset.driver.ctdpf_ckl.wfp.resource import RESOURCE_PATH from mi.dataset.dataset_driver import ParticleDataHandler __author__ = 'mworden' log = get_logger() class DriverTest(unittest.TestCase): def test_one(self): source_file_path = os.path.join(RESOURCE_PATH, 'C0000034.dat') particle_data_handler = ParticleDataHandler() particle_data_handler = parse(None, source_file_path, particle_data_handler) log.debug("SAMPLES: %s", particle_data_handler._samples) log.debug("FAILURE: %s", particle_data_handler._failure) if __name__ == '__main__': test = DriverTest('test_one') test.test_one()
isrohutamahopetechnik/MissionPlanner
refs/heads/master
Lib/lib2to3/fixes/fix_set_literal.py
61
""" Optional fixer to transform set() calls to set literals. """ # Author: Benjamin Peterson from lib2to3 import fixer_base, pytree from lib2to3.fixer_util import token, syms class FixSetLiteral(fixer_base.BaseFix): BM_compatible = True explicit = True PATTERN = """power< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > """ def transform(self, node, results): single = results.get("single") if single: # Make a fake listmaker fake = pytree.Node(syms.listmaker, [single.clone()]) single.replace(fake) items = fake else: items = results["items"] # Build the contents of the literal literal = [pytree.Leaf(token.LBRACE, u"{")] literal.extend(n.clone() for n in items.children) literal.append(pytree.Leaf(token.RBRACE, u"}")) # Set the prefix of the right brace to that of the ')' or ']' literal[-1].prefix = items.next_sibling.prefix maker = pytree.Node(syms.dictsetmaker, literal) maker.prefix = node.prefix # If the original was a one tuple, we need to remove the extra comma. if len(maker.children) == 4: n = maker.children[2] n.remove() maker.children[-1].prefix = n.prefix # Finally, replace the set call with our shiny new literal. return maker
garncarz/dns-server
refs/heads/master
dns/tests/test_redirection.py
1
from django.test import TestCase from dns import models class RedirectionTestCase(TestCase): def test_redirection(self): url = 'https://vietcong1.eu' models.Redirection.objects.create(abbr='vc', target=url) resp = self.client.get('/links/vc') self.assertRedirects(resp, url, fetch_redirect_response=False)
suitai/MyTweetApp
refs/heads/master
bin/download.py
1
#!/usr/bin/env python # -*- coding:utf-8 -*- import os import sys import getopt import urllib2 def download(url, out_dir, overwrite=False): file_name = url.split('/')[-1] out_dir = os.path.expanduser(out_dir) out_dir = os.path.expandvars(out_dir) out_file = "%s/%s" % (out_dir, file_name) if not os.path.isdir(out_dir): raise DownloadError("Cannot find directory \"%s\"." % out_dir) if os.path.exists(out_file) and not overwrite: raise DownloadError("The file \"%s\" is present." % out_file) urlopen = urllib2.urlopen(url) info = urlopen.info() file_size = int(info.getheaders("Content-Length")[0]) print "Downloading: %s Bytes: %s" % (file_name, file_size) file_size_dl = 0 block_sz = 4096 with open(out_file, 'wb') as stream: while file_size: buffer = urlopen.read(block_sz) if not buffer: break file_size_dl += len(buffer) stream.write(buffer) status = r"%10d/%10d [%3.2f%%]" % (file_size_dl, file_size, file_size_dl * 100. / file_size) status = status + chr(8)*(len(status)+1) print status, class DownloadError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def main(): out_dir = "." overwrite = False try: optlist, args = getopt.getopt(sys.argv[1:], 'o:', ["overwrite"]) except getopt.GetoptError as detail: sys.exit("GetoptError: %s" % detail) for opt, arg in optlist: if opt == "-o": out_dir = arg elif opt == "--overwrite": overwrite = True else: assert False, "unhandled option" try: download(args[0], out_dir, overwrite=overwrite) except DownloadError as detail: sys.exit("Error: %s" % detail) ### Execute if __name__ == "__main__": main()
ivoflipse/devide
refs/heads/master
snippets/changeObjectsLUT.py
7
# snippet to change the LookupTables (colourmaps) of the selected objects # this should be run in the introspection context of a slice3dVWR # $Id$ import os import tempfile import vtk className = obj.__class__.__name__ if className == 'slice3dVWR': # find all polydata objects so = obj._tdObjects._getSelectedObjects() polyDatas = [pd for pd in so if hasattr(pd, 'GetClassName') and pd.GetClassName() == 'vtkPolyData'] # now find their Mappers objectsDict = obj._tdObjects._tdObjectsDict actors = [objectsDict[pd]['vtkActor'] for pd in polyDatas] mappers = [a.GetMapper() for a in actors] for mapper in mappers: lut = mapper.GetLookupTable() lut.SetScaleToLog10() #lut.SetScaleToLinear() srange = mapper.GetInput().GetScalarRange() lut.SetTableRange(srange) lut.SetSaturationRange(1.0,1.0) lut.SetValueRange(1.0, 1.0) lut.SetHueRange(0.1, 1.0) lut.Build() else: print "This snippet must be run from a slice3dVWR introspection window."
g-vidal/upm
refs/heads/master
examples/python/aeotecdsb09104.py
6
#!/usr/bin/env python # Author: Jon Trulson <jtrulson@ics.com> # Copyright (c) 2016 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import time, sys, signal, atexit from upm import pyupm_ozw as sensorObj def main(): # This function lets you run code on exit def exitHandler(): print("Exiting") sys.exit(0) # Register exit handlers atexit.register(exitHandler) defaultDev = "/dev/ttyACM0" if (len(sys.argv) > 1): defaultDev = sys.argv[1] print("Using device", defaultDev) # Instantiate an Aeotec DSB09104 instance, on device node 12. You # will almost certainly need to change this to reflect your own # network. Use the ozwdump example to see what nodes are available. sensor = sensorObj.AeotecDSB09104(12) # The first thing to do is create options, then lock them when done. sensor.optionsCreate() sensor.optionsLock() # Next, initialize it. print("Initializing, this may take awhile depending on your ZWave network") sensor.init(defaultDev) print("Initialization complete") print("Querying data...") while (True): sensor.update() print("Watts, Channel 1: %0.03f W" % sensor.getWattsC1()) print("Watts, Channel 2: %0.03f W" % sensor.getWattsC2()) print("Watts, Channel 3: %0.03f W" % sensor.getWattsC3()) print("Energy, Channel 1: %0.03f kWh" % sensor.getEnergyC1()) print("Energy, Channel 2: %0.03f kWh" % sensor.getEnergyC2()) print("Energy, Channel 3: %0.03f kWh" % sensor.getEnergyC3()) print("Battery Level: %d\n" % sensor.getBatteryLevel()) time.sleep(3) if __name__ == '__main__': main()
phoebusliang/parallel-lettuce
refs/heads/master
tests/integration/lib/Django-1.2.5/tests/regressiontests/localflavor/us/models.py
47
from django.db import models from django.contrib.localflavor.us.models import USStateField # When creating models you need to remember to add a app_label as # 'localflavor', so your model can be found class USPlace(models.Model): state = USStateField(blank=True) state_req = USStateField() state_default = USStateField(default="CA", blank=True) name = models.CharField(max_length=20) class Meta: app_label = 'localflavor'
marckuz/django
refs/heads/master
tests/datetimes/__init__.py
12133432
anaran/olympia
refs/heads/master
scripts/__init__.py
12133432
manassolanki/erpnext
refs/heads/develop
erpnext/hr/doctype/driving_license_category/__init__.py
12133432
angelapper/odoo
refs/heads/9.0
addons/product_extended/product_extended.py
24
# Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp.osv import fields from openerp.osv import osv class product_template(osv.osv): _name = 'product.template' _inherit = 'product.template' def compute_price(self, cr, uid, product_ids, template_ids=False, recursive=False, test=False, real_time_accounting = False, context=None): ''' Will return test dict when the test = False Multiple ids at once? testdict is used to inform the user about the changes to be made ''' testdict = {} if product_ids: ids = product_ids model = 'product.product' else: ids = template_ids or [] model = 'product.template' for prod_id in ids: bom_obj = self.pool.get('mrp.bom') if model == 'product.product': bom_id = bom_obj._bom_find(cr, uid, product_id=prod_id, context=context) else: bom_id = bom_obj._bom_find(cr, uid, product_tmpl_id=prod_id, context=context) if bom_id: # In recursive mode, it will first compute the prices of child boms if recursive: #Search the products that are components of this bom of prod_id bom = bom_obj.browse(cr, uid, bom_id, context=context) #Call compute_price on these subproducts prod_set = set([x.product_id.id for x in bom.bom_line_ids]) res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context) if test: testdict.update(res) #Use calc price to calculate and put the price on the product of the BoM if necessary price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context) if test: testdict.update({prod_id : price}) if test: return testdict else: return True def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None): if context is None: context={} price = 0 uom_obj = self.pool.get("product.uom") tmpl_obj = self.pool.get('product.template') for sbom in bom.bom_line_ids: my_qty = sbom.product_qty / sbom.product_efficiency if not sbom.attribute_value_ids: # No attribute_value_ids means the bom line is not variant specific price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty if bom.routing_id: for wline in bom.routing_id.workcenter_lines: wc = wline.workcenter_id cycle = wline.cycle_nbr hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0) price += wc.costs_cycle * cycle + wc.costs_hour * hour price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id) #Convert on product UoM quantities if price > 0: price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id) product = tmpl_obj.browse(cr, uid, bom.product_tmpl_id.id, context=context) if not test: if (product.valuation != "real_time" or not real_time_accounting): tmpl_obj.write(cr, uid, [product.id], {'standard_price' : price}, context=context) else: #Call wizard function here wizard_obj = self.pool.get("stock.change.standard.price") ctx = context.copy() ctx.update({'active_id': product.id, 'active_model': 'product.template'}) wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx) wizard_obj.change_price(cr, uid, [wiz_id], context=ctx) return price class product_bom(osv.osv): _inherit = 'mrp.bom' def _get_variant_count(self, cr, uid, ids, field_name, arg, context=None): res = {} for bom in self.browse(cr, uid, ids, context=context): res[bom.id] = bom.product_tmpl_id.product_variant_count return res _columns = { 'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False), 'get_variant_count': fields.function(_get_variant_count, type='integer', string='Number of variant for the product'), } product_bom()
mengxn/tensorflow
refs/heads/master
tensorflow/python/estimator/model_fn_test.py
42
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for model_fn.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.estimator import model_fn from tensorflow.python.estimator.export import export_output from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import control_flow_ops from tensorflow.python.platform import test from tensorflow.python.saved_model import signature_constants from tensorflow.python.training import monitored_session from tensorflow.python.training import session_run_hook class _FakeHook(session_run_hook.SessionRunHook): """Fake implementation of `SessionRunHook`.""" class _InvalidHook(object): """Invalid hook (not a subclass of `SessionRunHook`).""" class _InvalidScaffold(object): """Invalid scaffold (not a subclass of `Scaffold`).""" class EstimatorSpecTrainTest(test.TestCase): """Tests EstimatorSpec in train mode.""" def testRequiredArgumentsSet(self): """Tests that no errors are raised when all required arguments are set.""" with ops.Graph().as_default(), self.test_session(): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op=control_flow_ops.no_op()) def testAllArgumentsSet(self): """Tests that no errors are raised when all arguments are set.""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) predictions = {'loss': loss} classes = constant_op.constant('hello') model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, predictions=predictions, loss=loss, train_op=control_flow_ops.no_op(), eval_metric_ops={'loss': (control_flow_ops.no_op(), loss)}, export_outputs={ 'head_name': export_output.ClassificationOutput(classes=classes) }, training_chief_hooks=[_FakeHook()], training_hooks=[_FakeHook()], scaffold=monitored_session.Scaffold()) def testLossNumber(self): """Tests that error is raised when loss is a number (not Tensor).""" with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(TypeError, 'loss must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=1., train_op=control_flow_ops.no_op()) def testLoss1DTensor(self): """Tests that no errors are raised when loss is 1D tensor.""" with ops.Graph().as_default(), self.test_session(): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant([1.]), train_op=control_flow_ops.no_op()) def testLossMissing(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(ValueError, 'Missing loss'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, train_op=control_flow_ops.no_op()) def testLossNotScalar(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(ValueError, 'Loss must be scalar'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant([1., 2.]), train_op=control_flow_ops.no_op()) def testLossSparseTensor(self): with ops.Graph().as_default(), self.test_session(): loss = sparse_tensor.SparseTensor( indices=[[0]], values=[0.], dense_shape=[1]) with self.assertRaisesRegexp(TypeError, 'loss must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=loss, train_op=control_flow_ops.no_op()) def testLossFromDifferentGraph(self): with ops.Graph().as_default(): loss = constant_op.constant(1.) with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( ValueError, 'must be from the default graph'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=loss, train_op=control_flow_ops.no_op()) def testTrainOpMissing(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(ValueError, 'Missing train_op'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.)) def testTrainOpNotOperationAndTensor(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(TypeError, 'train_op must be Operation or Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op='Not an Operation or Tensor') def testTrainOpFromDifferentGraph(self): with ops.Graph().as_default(): train_op = control_flow_ops.no_op() with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( ValueError, 'must be from the default graph'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op=train_op) def testTrainingChiefHookInvalid(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( TypeError, 'All hooks must be SessionRunHook instances'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op=control_flow_ops.no_op(), training_chief_hooks=[_InvalidHook()]) def testTrainingHookInvalid(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( TypeError, 'All hooks must be SessionRunHook instances'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op=control_flow_ops.no_op(), training_hooks=[_InvalidHook()]) def testScaffoldInvalid(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( TypeError, r'scaffold must be tf\.train\.Scaffold'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op=control_flow_ops.no_op(), scaffold=_InvalidScaffold()) def testReturnDefaultScaffold(self): with ops.Graph().as_default(), self.test_session(): estimator_spec = model_fn.EstimatorSpec( mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.), train_op=control_flow_ops.no_op()) self.assertIsNotNone(estimator_spec.scaffold) class EstimatorSpecEvalTest(test.TestCase): """Tests EstimatorSpec in eval mode.""" def testRequiredArgumentsSet(self): """Tests that no errors are raised when all required arguments are set.""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss) def testAllArgumentsSet(self): """Tests that no errors are raised when all arguments are set.""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) predictions = {'loss': loss} classes = constant_op.constant('hello') model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions=predictions, loss=loss, train_op=control_flow_ops.no_op(), eval_metric_ops={'loss': (control_flow_ops.no_op(), loss)}, export_outputs={ 'head_name': export_output.ClassificationOutput(classes=classes) }, training_chief_hooks=[_FakeHook()], training_hooks=[_FakeHook()], scaffold=monitored_session.Scaffold()) def testTupleMetric(self): """Tests that no errors are raised when a metric is tuple-valued.""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, loss=loss, eval_metric_ops={ 'some_metric': ((loss, loss, (constant_op.constant(2), loss)), control_flow_ops.no_op())}) def testLoss1DTensor(self): """Tests that no errors are raised when loss is 1D tensor.""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant([1.]) model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss) def testLossNumber(self): """Tests that error is raised when loss is a number (not Tensor).""" with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(TypeError, 'loss must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': constant_op.constant(1.)}, loss=1.) def testLossMissing(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(ValueError, 'Missing loss'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': constant_op.constant(1.)}) def testLossNotScalar(self): with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant([1., 2.]) with self.assertRaisesRegexp(ValueError, 'Loss must be scalar'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss) def testLossSparseTensor(self): with ops.Graph().as_default(), self.test_session(): loss = sparse_tensor.SparseTensor( indices=[[0]], values=[0.], dense_shape=[1]) with self.assertRaisesRegexp( TypeError, 'loss must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'prediction': constant_op.constant(1.)}, loss=loss) def testLossFromDifferentGraph(self): with ops.Graph().as_default(): loss = constant_op.constant(1.) with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( ValueError, 'must be from the default graph'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'prediction': constant_op.constant(1.)}, loss=loss) def testPredictionsMissingIsOkay(self): with ops.Graph().as_default(), self.test_session(): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, loss=constant_op.constant(1.)) def testPredictionsTensor(self): """Tests that no error is raised when predictions is Tensor (not dict).""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions=loss, loss=loss) def testPredictionsNumber(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( TypeError, r'predictions\[number\] must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'number': 1.}, loss=constant_op.constant(1.)) def testPredictionsSparseTensor(self): with ops.Graph().as_default(), self.test_session(): predictions = { 'sparse': sparse_tensor.SparseTensor( indices=[[0]], values=[0.], dense_shape=[1])} with self.assertRaisesRegexp( TypeError, r'predictions\[sparse\] must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions=predictions, loss=constant_op.constant(1.)) def testPredictionsFromDifferentGraph(self): with ops.Graph().as_default(): predictions = {'loss': constant_op.constant(1.)} with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( ValueError, 'must be from the default graph'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions=predictions, loss=constant_op.constant(1.)) def testEvalMetricOpsNoDict(self): with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) with self.assertRaisesRegexp( TypeError, 'eval_metric_ops must be a dict'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss, eval_metric_ops=loss) def testEvalMetricOpsNoTuple(self): with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) with self.assertRaisesRegexp( TypeError, (r'Values of eval_metric_ops must be \(metric_value, update_op\) ' 'tuples')): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss, eval_metric_ops={'loss': loss}) def testEvalMetricOpsNoTensorOrOperation(self): with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) with self.assertRaisesRegexp(TypeError, 'must be Operation or Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss, eval_metric_ops={'loss': ('NonTensor', loss)}) def testEvalMetricNestedNoTensorOrOperation(self): with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) with self.assertRaisesRegexp(TypeError, 'must be Operation or Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss, eval_metric_ops={'loss': ((('NonTensor',),), control_flow_ops.no_op())}) def testEvalMetricOpsFromDifferentGraph(self): with ops.Graph().as_default(): eval_metric_ops = { 'loss': (control_flow_ops.no_op(), constant_op.constant(1.))} with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) with self.assertRaisesRegexp( ValueError, 'must be from the default graph'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss, eval_metric_ops=eval_metric_ops) class EstimatorSpecInferTest(test.TestCase): """Tests EstimatorSpec in infer mode.""" def testRequiredArgumentsSet(self): """Tests that no errors are raised when all required arguments are set.""" with ops.Graph().as_default(), self.test_session(): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions={'loss': constant_op.constant(1.)}) def testAllArgumentsSet(self): """Tests that no errors are raised when all arguments are set.""" with ops.Graph().as_default(), self.test_session(): loss = constant_op.constant(1.) predictions = {'loss': loss} classes = constant_op.constant('hello') model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, loss=loss, train_op=control_flow_ops.no_op(), eval_metric_ops={'loss': (control_flow_ops.no_op(), loss)}, export_outputs={ 'head_name': export_output.ClassificationOutput(classes=classes) }, training_chief_hooks=[_FakeHook()], training_hooks=[_FakeHook()], scaffold=monitored_session.Scaffold()) def testPredictionsMissing(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp(ValueError, 'Missing predictions'): model_fn.EstimatorSpec(mode=model_fn.ModeKeys.PREDICT) def testPredictionsTensor(self): """Tests that no error is raised when predictions is Tensor (not dict).""" with ops.Graph().as_default(), self.test_session(): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=constant_op.constant(1.)) def testPredictionsNumber(self): with ops.Graph().as_default(), self.test_session(): with self.assertRaisesRegexp( TypeError, r'predictions\[number\] must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions={'number': 1.}) def testPredictionsSparseTensor(self): with ops.Graph().as_default(), self.test_session(): predictions = { 'sparse': sparse_tensor.SparseTensor( indices=[[0]], values=[0.], dense_shape=[1])} with self.assertRaisesRegexp( TypeError, r'predictions\[sparse\] must be Tensor'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions) def testExportOutputsNoDict(self): with ops.Graph().as_default(), self.test_session(): predictions = {'loss': constant_op.constant(1.)} classes = constant_op.constant('hello') with self.assertRaisesRegexp( TypeError, 'export_outputs must be dict'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_output.ClassificationOutput(classes=classes)) def testExportOutputsValueNotExportOutput(self): with ops.Graph().as_default(), self.test_session(): predictions = {'loss': constant_op.constant(1.)} with self.assertRaisesRegexp( TypeError, r"Values in export_outputs must be ExportOutput objects. " r"Given: {'head_name': {'loss': <tf.Tensor 'Const:0' shape=\(\) " r"dtype=float32>}}"): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs={'head_name': predictions}) def testExportOutputsSingleheadMissingDefault(self): with ops.Graph().as_default(), self.test_session(): predictions = {'loss': constant_op.constant(1.)} output_1 = constant_op.constant([1.]) regression_output = export_output.RegressionOutput(value=output_1) export_outputs = { 'head-1': regression_output, } estimator_spec = model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs) expected_export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: regression_output, 'head-1': regression_output, } self.assertEqual(expected_export_outputs, estimator_spec.export_outputs) def testExportOutputsMultiheadWithDefault(self): with ops.Graph().as_default(), self.test_session(): predictions = {'loss': constant_op.constant(1.)} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(['2']) output_3 = constant_op.constant(['3']) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.RegressionOutput(value=output_1), 'head-2': export_output.ClassificationOutput(classes=output_2), 'head-3': export_output.PredictOutput(outputs={ 'some_output_3': output_3 })} estimator_spec = model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs) self.assertEqual(export_outputs, estimator_spec.export_outputs) def testExportOutputsMultiheadMissingDefault(self): with ops.Graph().as_default(), self.test_session(): predictions = {'loss': constant_op.constant(1.)} output_1 = constant_op.constant([1.]) output_2 = constant_op.constant(['2']) output_3 = constant_op.constant(['3']) export_outputs = { 'head-1': export_output.RegressionOutput(value=output_1), 'head-2': export_output.ClassificationOutput(classes=output_2), 'head-3': export_output.PredictOutput(outputs={ 'some_output_3': output_3 })} with self.assertRaisesRegexp( ValueError, 'Multiple export_outputs were provided, but none of them is ' 'specified as the default. Do this by naming one of them with ' 'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.'): model_fn.EstimatorSpec( mode=model_fn.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs) if __name__ == '__main__': test.main()
psi4/DatenQM
refs/heads/master
qcfractal/alembic/versions/30fd8253d87f_kvstore_compress.py
1
"""Compression of KVStore Revision ID: 30fd8253d87f Revises: 1604623c481a Create Date: 2020-08-30 10:11:57.574292 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = "30fd8253d87f" down_revision = "1604623c481a" branch_labels = None depends_on = None def upgrade(): # Appears there is an alembic issue with autogenerating enums: # https://github.com/sqlalchemy/alembic/issues/278 compression_enum = postgresql.ENUM("none", "gzip", "bzip2", "lzma", name="compressionenum") compression_enum.create(op.get_bind()) # ### commands auto generated by Alembic - please adjust! ### op.add_column("kv_store", sa.Column("compression", compression_enum, nullable=True)) op.add_column("kv_store", sa.Column("compression_level", sa.Integer(), nullable=True)) op.add_column("kv_store", sa.Column("data", sa.LargeBinary(), nullable=True)) op.alter_column("kv_store", "value", existing_type=postgresql.JSON(astext_type=sa.Text()), nullable=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column("kv_store", "value", existing_type=postgresql.JSON(astext_type=sa.Text()), nullable=False) op.drop_column("kv_store", "data") op.drop_column("kv_store", "compression_level") op.drop_column("kv_store", "compression") op.execute("DROP TYPE compressionenum") # ### end Alembic commands ###
pariahsoft/Dennis
refs/heads/master
commands/help.py
1
######################################## ## Adventure Bot "Dennis" ## ## commands/help.py ## ## Copyright 2012-2013 PariahSoft LLC ## ######################################## ## ********** ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to ## deal in the Software without restriction, including without limitation the ## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or ## sell copies of the Software, and to permit persons to whom the Software is ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in ## all copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ## IN THE SOFTWARE. ## ********** ################ # Help Command # ################ from helpers import * def C_HELP(S, DB, sender, args): if len(args) == 0: # List commands. body = "Commands: " for n, h in enumerate(help_entries): if n < len(help_entries) - 1: body += h[0] + ", " else: body += h[0] send(S, sender, body) else: # Look for this command. body = "" for h in help_entries: if h[0].lower() == " ".join(args).lower(): # Return command help. body = "{0}: {1}".format(h[1], h[2]) send(S, sender, body) return C_HELP(S, DB, sender, ["help"]) ### Help Entries ### help_entries = [ ["help", "help [command]", "List commands or give usage for specified command."], ["join", "join <pass>", "Authenticate and join the game. Creates account with pass on first join."], ["quit", "quit", "Leave the game."], ["say", "say <text>", "Say something in the current room's chat."], ["shout", "shout <text>", "Say something in the entire world's chat."], ["roll", "roll <min> <max>", "Roll a random number between min and max."], ["me", "me <text>", "Perform an action in the current room's chat."], ["look", "look [item|player]", "Look at current room, or specified item or player."], ["go", "go [exit]", "List exits in current room or use specified exit."], ["list", "list [name]", "List all rooms, or return the ID of the named room."], ["self", "self", "Look at yourself."], ["self SET", "self SET name|desc <text>", "Set your own name or description."], ["mkroom", "mkroom <name>", "Create a new named room and return its ID."], ["room", "room", "Look at the current room."], ["room SET", "room SET name|desc|lock|owner <text>", "Set current room's name, description, lock flag, or owner."], ["room UNLINK", "room UNLINK", "Remove all exits to the current room. Must be room owner."], ["exit", "exit", "List the current room's exits."], ["exit SET", "exit SET <id> <name>", "Create named exit in current room to specified room ID."], ["exit DEL", "exit DEL <name>", "Delete the named exit from current room."], ["mkitem", "mkitem <name>", "Create a new named item and return its ID."], ["item", "item", "List items in current room."], ["item SET", "item SET <id> name|desc <text>", "Set the item's name or description."], ["item DEL", "item DEL <id>", "Delete the named item from current room."], ["tp", "tp <id>", "Warp to the specified room by ID."], ["version", "version", "Print bot info and version."], ]
AlexRobson/scikit-learn
refs/heads/master
examples/linear_model/plot_iris_logistic.py
283
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logistic Regression 3-class Classifier ========================================================= Show below is a logistic-regression classifiers decision boundaries on the `iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints are colored according to their labels. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show()
iglpdc/nipype
refs/heads/master
nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py
12
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from .....testing import assert_equal from ..brains import BRAINSEyeDetector def test_BRAINSEyeDetector_inputs(): input_map = dict(args=dict(argstr='%s', ), debugDir=dict(argstr='--debugDir %s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), terminal_output=dict(nohash=True, ), ) inputs = BRAINSEyeDetector.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSEyeDetector_outputs(): output_map = dict(outputVolume=dict(), ) outputs = BRAINSEyeDetector.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
thomazs/geraldo
refs/heads/master
site/newsite/site-geraldo/django/utils/text.py
15
import re from django.conf import settings from django.utils.encoding import force_unicode from django.utils.functional import allow_lazy from django.utils.translation import ugettext_lazy from htmlentitydefs import name2codepoint # Capitalizes the first letter of a string. capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:] capfirst = allow_lazy(capfirst, unicode) def wrap(text, width): """ A word-wrap function that preserves existing line breaks and most spaces in the text. Expects that existing line breaks are posix newlines. """ text = force_unicode(text) def _generator(): it = iter(text.split(' ')) word = it.next() yield word pos = len(word) - word.rfind('\n') - 1 for word in it: if "\n" in word: lines = word.split('\n') else: lines = (word,) pos += len(lines[0]) + 1 if pos > width: yield '\n' pos = len(lines[-1]) else: yield ' ' if len(lines) > 1: pos = len(lines[-1]) yield word return u''.join(_generator()) wrap = allow_lazy(wrap, unicode) def truncate_words(s, num): "Truncates a string after a certain number of words." s = force_unicode(s) length = int(num) words = s.split() if len(words) > length: words = words[:length] if not words[-1].endswith('...'): words.append('...') return u' '.join(words) truncate_words = allow_lazy(truncate_words, unicode) def truncate_html_words(s, num): """ Truncates html to a certain number of words (not counting tags and comments). Closes opened tags if they were correctly closed in the given html. """ s = force_unicode(s) length = int(num) if length <= 0: return u'' html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input') # Set up regular expressions re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U) re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>') # Count non-HTML words and keep note of open tags pos = 0 ellipsis_pos = 0 words = 0 open_tags = [] while words <= length: m = re_words.search(s, pos) if not m: # Checked through whole string break pos = m.end(0) if m.group(1): # It's an actual non-HTML word words += 1 if words == length: ellipsis_pos = pos continue # Check for tag tag = re_tag.match(m.group(0)) if not tag or ellipsis_pos: # Don't worry about non tags or tags after our truncate point continue closing_tag, tagname, self_closing = tag.groups() tagname = tagname.lower() # Element names are always case-insensitive if self_closing or tagname in html4_singlets: pass elif closing_tag: # Check for match in open tags list try: i = open_tags.index(tagname) except ValueError: pass else: # SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags open_tags = open_tags[i+1:] else: # Add it to the start of the open tags list open_tags.insert(0, tagname) if words <= length: # Don't try to close tags if we don't need to truncate return s out = s[:ellipsis_pos] + ' ...' # Close any tags still open for tag in open_tags: out += '</%s>' % tag # Return string return out truncate_html_words = allow_lazy(truncate_html_words, unicode) def get_valid_filename(s): """ Returns the given string converted to a string that can be used for a clean filename. Specifically, leading and trailing spaces are removed; other spaces are converted to underscores; and all non-filename-safe characters are removed. >>> get_valid_filename("john's portrait in 2004.jpg") u'johns_portrait_in_2004.jpg' """ s = force_unicode(s).strip().replace(' ', '_') return re.sub(r'[^-A-Za-z0-9_.]', '', s) get_valid_filename = allow_lazy(get_valid_filename, unicode) def get_text_list(list_, last_word=ugettext_lazy(u'or')): """ >>> get_text_list(['a', 'b', 'c', 'd']) u'a, b, c or d' >>> get_text_list(['a', 'b', 'c'], 'and') u'a, b and c' >>> get_text_list(['a', 'b'], 'and') u'a and b' >>> get_text_list(['a']) u'a' >>> get_text_list([]) u'' """ if len(list_) == 0: return u'' if len(list_) == 1: return force_unicode(list_[0]) return u'%s %s %s' % (', '.join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1])) get_text_list = allow_lazy(get_text_list, unicode) def normalize_newlines(text): return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text)) normalize_newlines = allow_lazy(normalize_newlines, unicode) def recapitalize(text): "Recapitalizes text, placing caps after end-of-sentence punctuation." text = force_unicode(text).lower() capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])') text = capsRE.sub(lambda x: x.group(1).upper(), text) return text recapitalize = allow_lazy(recapitalize) def phone2numeric(phone): "Converts a phone number with letters into its numeric equivalent." letters = re.compile(r'[A-PR-Y]', re.I) char2number = lambda m: {'a': '2', 'c': '2', 'b': '2', 'e': '3', 'd': '3', 'g': '4', 'f': '3', 'i': '4', 'h': '4', 'k': '5', 'j': '5', 'm': '6', 'l': '5', 'o': '6', 'n': '6', 'p': '7', 's': '7', 'r': '7', 'u': '8', 't': '8', 'w': '9', 'v': '8', 'y': '9', 'x': '9'}.get(m.group(0).lower()) return letters.sub(char2number, phone) phone2numeric = allow_lazy(phone2numeric) # From http://www.xhaus.com/alan/python/httpcomp.html#gzip # Used with permission. def compress_string(s): import cStringIO, gzip zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(s) zfile.close() return zbuf.getvalue() ustring_re = re.compile(u"([\u0080-\uffff])") def javascript_quote(s, quote_double_quotes=False): def fix(match): return r"\u%04x" % ord(match.group(1)) if type(s) == str: s = s.decode('utf-8') elif type(s) != unicode: raise TypeError, s s = s.replace('\\', '\\\\') s = s.replace('\r', '\\r') s = s.replace('\n', '\\n') s = s.replace('\t', '\\t') s = s.replace("'", "\\'") if quote_double_quotes: s = s.replace('"', '&quot;') return str(ustring_re.sub(fix, s)) javascript_quote = allow_lazy(javascript_quote, unicode) smart_split_re = re.compile('("(?:[^"\\\\]*(?:\\\\.[^"\\\\]*)*)"|\'(?:[^\'\\\\]*(?:\\\\.[^\'\\\\]*)*)\'|[^\\s]+)') def smart_split(text): r""" Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks. >>> list(smart_split(r'This is "a person\'s" test.')) [u'This', u'is', u'"a person\\\'s"', u'test.'] >>> list(smart_split(r"Another 'person\'s' test.")) [u'Another', u"'person's'", u'test.'] >>> list(smart_split(r'A "\"funky\" style" test.')) [u'A', u'""funky" style"', u'test.'] """ text = force_unicode(text) for bit in smart_split_re.finditer(text): bit = bit.group(0) if bit[0] == '"' and bit[-1] == '"': yield '"' + bit[1:-1].replace('\\"', '"').replace('\\\\', '\\') + '"' elif bit[0] == "'" and bit[-1] == "'": yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'" else: yield bit smart_split = allow_lazy(smart_split, unicode) def _replace_entity(match): text = match.group(1) if text[0] == u'#': text = text[1:] try: if text[0] in u'xX': c = int(text[1:], 16) else: c = int(text) return unichr(c) except ValueError: return match.group(0) else: try: return unichr(name2codepoint[text]) except (ValueError, KeyError): return match.group(0) _entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));") def unescape_entities(text): return _entity_re.sub(_replace_entity, text) unescape_entities = allow_lazy(unescape_entities, unicode)
dfm/arxiv2speech
refs/heads/master
arxiv2speech.py
1
#!/usr/bin/env python from __future__ import print_function, absolute_import, unicode_literals __all__ = ["run"] __version__ = "0.0.4" __author__ = "Dan Foreman-Mackey (danfm@nyu.edu)" __copyright__ = "Copyright 2013 Daniel Foreman-Mackey" __contributors__ = [] import os import re import json import shutil import subprocess from multiprocessing import Pool import feedparser from html2text import html2text # Regular expressions. id_re = re.compile(r"http://arxiv.org/abs/(.*)") title_re = re.compile(r"(.*) \(arXiv(?:.*?)\)$") author_re = re.compile(r"<a href=\"(?:.*?)\">(.*?)</a>") def run(basedir, url="http://export.arxiv.org/rss/astro-ph", clobber=False, quiet=False, limit=None): # Make the base directory. try: os.makedirs(basedir) except: if not clobber: raise shutil.rmtree(basedir) os.makedirs(basedir) # Fetch the abstracts. if not quiet: print("Fetching recent abstracts from: {0}".format(url)) abstracts = get_recent(url) if not quiet: print(" ... Found {0} abstracts.".format(len(abstracts))) if limit is not None: print("Limiting to {0} total.".format(limit)) abstracts = abstracts[:int(limit)] if not quiet: print("Saving audio files (slowly) in: {0}".format(basedir)) p = Pool() p.map(_run_one, zip([basedir] * len(abstracts), abstracts)) if not quiet: print(" ... Done.") def _run_one(args): basedir, abstract = args # Create the directory for the audio files. basedir = os.path.join(basedir, abstract["id"]) os.makedirs(basedir) # Save the metadata. json.dump(abstract, open(os.path.join(basedir, "info.json"), "w"), sort_keys=True, indent=4, separators=(",", ": ")) # Save the audio files. by = "\n\nBy: " + abstract["authors"][0] l = len(abstract["authors"]) if l == 2: by += " and " + abstract["authors"][1] elif l > 2: by += " and {0} others.".format(l - 1) r = text2audio(abstract["title"] + by, os.path.join(basedir, "brief.m4a")) assert r == 0, "Couldn't save brief for: {0}".format(abstract["id"]) r = text2audio(", ".join(abstract["authors"]), os.path.join(basedir, "authors.m4a")) assert r == 0, "Couldn't save authors for: {0}".format(abstract["id"]) r = text2audio(abstract["abstract"], os.path.join(basedir, "abstract.m4a")) assert r == 0, "Couldn't save abstract for: {0}".format(abstract["id"]) def get_recent(rss_url): d = feedparser.parse(rss_url) results = [] for e in d.entries: results.append({ "id": id_re.findall(e.id)[0], "title": title_re.findall(e.title)[0], "authors": author_re.findall(e.author), "abstract": html2text(e.summary), }) return results def text2audio(text, filename): p = subprocess.Popen(["say", "-o", filename], stdin=subprocess.PIPE) p.communicate(text) code = p.wait() return code
drrelyea/SPGL1_python_port
refs/heads/master
spgl1/__init__.py
1
from .spgl1 import * from .lsqr import lsqr try: from .version import version as __version__ except ImportError: __version__ = '0.0.0'
mcauser/micropython
refs/heads/master
tests/extmod/ure_sub_unmatched.py
15
# test re.sub with unmatched groups, behaviour changed in CPython 3.5 try: import ure as re except ImportError: try: import re except ImportError: print("SKIP") raise SystemExit try: re.sub except AttributeError: print("SKIP") raise SystemExit # first group matches, second optional group doesn't so is replaced with a blank print(re.sub(r"(a)(b)?", r"\2-\1", "1a2"))
saukrIppl/seahub
refs/heads/master
thirdpart/Django-1.8.10-py2.7.egg/django/conf/locale/bn/formats.py
575
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F, Y' TIME_FORMAT = 'g:i A' # DATETIME_FORMAT = YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'j M, Y' # SHORT_DATETIME_FORMAT = # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' # NUMBER_GROUPING =
chrisndodge/edx-platform
refs/heads/master
common/djangoapps/student/tests/test_certificates.py
22
"""Tests for display of certificates on the student dashboard. """ import unittest import ddt import mock from django.conf import settings from django.core.urlresolvers import reverse from mock import patch from django.test.utils import override_settings from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.tests.factories import UserFactory, CourseEnrollmentFactory from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error from certificates.api import get_certificate_url # pylint: disable=import-error from certificates.models import CertificateStatuses # pylint: disable=import-error from course_modes.models import CourseMode from student.models import LinkedInAddToProfileConfiguration # pylint: disable=no-member class CertificateDisplayTestBase(SharedModuleStoreTestCase): """Tests display of certificates on the student dashboard. """ USERNAME = "test_user" PASSWORD = "password" DOWNLOAD_URL = "http://www.example.com/certificate.pdf" @classmethod def setUpClass(cls): super(CertificateDisplayTestBase, cls).setUpClass() cls.course = CourseFactory() cls.course.certificates_display_behavior = "early_with_info" with cls.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, cls.course.id): cls.store.update_item(cls.course, cls.USERNAME) def setUp(self): super(CertificateDisplayTestBase, self).setUp() self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) result = self.client.login(username=self.USERNAME, password=self.PASSWORD) self.assertTrue(result, msg="Could not log in") def _check_linkedin_visibility(self, is_visible): """ Performs assertions on the Dashboard """ response = self.client.get(reverse('dashboard')) if is_visible: self.assertContains(response, u'Add Certificate to LinkedIn Profile') else: self.assertNotContains(response, u'Add Certificate to LinkedIn Profile') def _create_certificate(self, enrollment_mode): """Simulate that the user has a generated certificate. """ CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode) return GeneratedCertificateFactory( user=self.user, course_id=self.course.id, mode=enrollment_mode, download_url=self.DOWNLOAD_URL, status="downloadable", grade=0.98, ) def _check_can_download_certificate(self): """ Inspect the dashboard to see if a certificate can be downloaded. """ response = self.client.get(reverse('dashboard')) self.assertContains(response, u'Download Your ID Verified') self.assertContains(response, self.DOWNLOAD_URL) def _check_can_download_certificate_no_id(self): """ Inspects the dashboard to see if a certificate for a non verified course enrollment is present """ response = self.client.get(reverse('dashboard')) self.assertContains(response, u'Download') self.assertContains(response, u'(PDF)') self.assertContains(response, self.DOWNLOAD_URL) def _check_can_not_download_certificate(self): """ Make sure response does not have any of the download certificate buttons """ response = self.client.get(reverse('dashboard')) self.assertNotContains(response, u'View Test_Certificate') self.assertNotContains(response, u'Download Your Test_Certificate (PDF)') self.assertNotContains(response, u'Download Test_Certificate (PDF)') self.assertNotContains(response, self.DOWNLOAD_URL) @ddt.ddt @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class CertificateDisplayTest(CertificateDisplayTestBase): """ Tests of certificate display. """ @ddt.data('verified', 'professional') @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False}) def test_display_verified_certificate(self, enrollment_mode): self._create_certificate(enrollment_mode) self._check_can_download_certificate() @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False}) def test_display_verified_certificate_no_id(self): """ Confirm that if we get a certificate with a no-id-professional mode we still can download our certificate """ self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE) self._check_can_download_certificate_no_id() @ddt.data('verified', 'honor', 'professional') def test_unverified_certificate_message(self, enrollment_mode): cert = self._create_certificate(enrollment_mode) cert.status = CertificateStatuses.unverified cert.save() response = self.client.get(reverse('dashboard')) self.assertContains( response, u'do not have a current verified identity with {platform_name}' .format(platform_name=settings.PLATFORM_NAME)) def test_post_to_linkedin_invisibility(self): """ Verifies that the post certificate to linked button does not appear by default (when config is not set) """ self._create_certificate('honor') # until we set up the configuration, the LinkedIn action # button should not be visible self._check_linkedin_visibility(False) def test_post_to_linkedin_visibility(self): """ Verifies that the post certificate to linked button appears as expected """ self._create_certificate('honor') config = LinkedInAddToProfileConfiguration( company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9', enabled=True ) config.save() # now we should see it self._check_linkedin_visibility(True) @mock.patch("openedx.core.djangoapps.theming.helpers.is_request_in_themed_site", mock.Mock(return_value=True)) def test_post_to_linkedin_site_specific(self): """ Verifies behavior for themed sites which disables the post to LinkedIn feature (for now) """ self._create_certificate('honor') config = LinkedInAddToProfileConfiguration( company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9', enabled=True ) config.save() # now we should not see it because we are in a themed site self._check_linkedin_visibility(False) @ddt.ddt @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class CertificateDisplayTestHtmlView(CertificateDisplayTestBase): """ Tests of webview certificate display """ @classmethod def setUpClass(cls): super(CertificateDisplayTestHtmlView, cls).setUpClass() cls.course.cert_html_view_enabled = True cls.course.save() cls.store.update_item(cls.course, cls.USERNAME) @ddt.data('verified', 'honor') @override_settings(CERT_NAME_SHORT='Test_Certificate') @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) def test_display_download_certificate_button(self, enrollment_mode): """ Tests if CERTIFICATES_HTML_VIEW is True and course has enabled web certificates via cert_html_view_enabled setting and no active certificate configuration available then any of the Download certificate button should not be visible. """ self._create_certificate(enrollment_mode) self._check_can_not_download_certificate() @ddt.ddt @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class CertificateDisplayTestLinkedHtmlView(CertificateDisplayTestBase): """ Tests of linked student certificates. """ @classmethod def setUpClass(cls): super(CertificateDisplayTestLinkedHtmlView, cls).setUpClass() cls.course.cert_html_view_enabled = True certificates = [ { 'id': 0, 'name': 'Test Name', 'description': 'Test Description', 'is_active': True, 'signatories': [], 'version': 1 } ] cls.course.certificates = {'certificates': certificates} cls.course.save() cls.store.update_item(cls.course, cls.USERNAME) @ddt.data('verified') @override_settings(CERT_NAME_SHORT='Test_Certificate') @patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) def test_linked_student_to_web_view_credential(self, enrollment_mode): cert = self._create_certificate(enrollment_mode) test_url = get_certificate_url(course_id=self.course.id, uuid=cert.verify_uuid) response = self.client.get(reverse('dashboard')) self.assertContains(response, u'View Test_Certificate') self.assertContains(response, test_url)
alexdglover/shill-isms
refs/heads/master
venv/lib/python2.7/site-packages/wheel/pkginfo.py
565
"""Tools for reading and writing PKG-INFO / METADATA without caring about the encoding.""" from email.parser import Parser try: unicode _PY3 = False except NameError: _PY3 = True if not _PY3: from email.generator import Generator def read_pkg_info_bytes(bytestr): return Parser().parsestr(bytestr) def read_pkg_info(path): with open(path, "r") as headers: message = Parser().parse(headers) return message def write_pkg_info(path, message): with open(path, 'w') as metadata: Generator(metadata, maxheaderlen=0).flatten(message) else: from email.generator import BytesGenerator def read_pkg_info_bytes(bytestr): headers = bytestr.decode(encoding="ascii", errors="surrogateescape") message = Parser().parsestr(headers) return message def read_pkg_info(path): with open(path, "r", encoding="ascii", errors="surrogateescape") as headers: message = Parser().parse(headers) return message def write_pkg_info(path, message): with open(path, "wb") as out: BytesGenerator(out, maxheaderlen=0).flatten(message)
xq262144/hue
refs/heads/master
desktop/core/src/desktop/management/commands/create_user_directories.py
8
# adapted from django-extensions (http://code.google.com/p/django-command-extensions/) # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from optparse import make_option from django.contrib.auth.models import User from django.core.management.base import CommandError, BaseCommand from django.utils.translation import ugettext_lazy as _ from desktop.models import Document2 LOG = logging.getLogger(__name__) class Command(BaseCommand): """ Creates home and Trash directories for users as needed, or specific user if username is provided If no arguments are provided, this command will loop through all users and create a home and Trash directory for the user if they don't exist. It will then move any orphan documents to the home directory. If --username is specified, it will only perform the operation for the specific user. """ help = _("Creates home and Trash directories for users as needed, or specific user if username is provided.") option_list = BaseCommand.option_list + ( make_option('--username', help=_("Username of user to create directories for."), action='store', default=None), ) def handle(self, *args, **options): users = User.objects.all() if options['username']: try: user = User.objects.get(username=options['username']) users = [user] except Exception, e: msg = 'Failed to get user with username %s: %s' % (options['username'], str(e)) self.stdout.write(msg) LOG.exception(msg) for user in users: try: msg = 'Attempting to create user directories for user: %s' % user.username self.stdout.write(msg) LOG.debug(msg) Document2.objects.create_user_directories(user) except Exception, e: msg = 'Failed to create user directories for user %s: %s' % (user.username, str(e)) self.stdout.write(msg) LOG.warn(msg)
grangier/django-11599
refs/heads/master
tests/modeltests/pagination/__init__.py
12133432
huangchuchuan/Spider
refs/heads/master
OopSpider/oop/oop/__init__.py
12133432
codeforamerica/template-maker
refs/heads/master
template_maker/builder/__init__.py
12133432
hrayr-artunyan/shuup
refs/heads/master
shuup_tests/admin/fixtures/__init__.py
12133432
ewitz/PhotoHaus
refs/heads/master
venv/lib/python2.7/site-packages/wtforms/ext/appengine/db.py
228
""" Form generation utilities for App Engine's ``db.Model`` class. The goal of ``model_form()`` is to provide a clean, explicit and predictable way to create forms based on ``db.Model`` classes. No malabarism or black magic should be necessary to generate a form for models, and to add custom non-model related fields: ``model_form()`` simply generates a form class that can be used as it is, or that can be extended directly or even be used to create other forms using ``model_form()``. Example usage: .. code-block:: python from google.appengine.ext import db from tipfy.ext.model.form import model_form # Define an example model and add a record. class Contact(db.Model): name = db.StringProperty(required=True) city = db.StringProperty() age = db.IntegerProperty(required=True) is_admin = db.BooleanProperty(default=False) new_entity = Contact(key_name='test', name='Test Name', age=17) new_entity.put() # Generate a form based on the model. ContactForm = model_form(Contact) # Get a form populated with entity data. entity = Contact.get_by_key_name('test') form = ContactForm(obj=entity) Properties from the model can be excluded from the generated form, or it can include just a set of properties. For example: .. code-block:: python # Generate a form based on the model, excluding 'city' and 'is_admin'. ContactForm = model_form(Contact, exclude=('city', 'is_admin')) # or... # Generate a form based on the model, only including 'name' and 'age'. ContactForm = model_form(Contact, only=('name', 'age')) The form can be generated setting field arguments: .. code-block:: python ContactForm = model_form(Contact, only=('name', 'age'), field_args={ 'name': { 'label': 'Full name', 'description': 'Your name', }, 'age': { 'label': 'Age', 'validators': [validators.NumberRange(min=14, max=99)], } }) The class returned by ``model_form()`` can be used as a base class for forms mixing non-model fields and/or other model forms. For example: .. code-block:: python # Generate a form based on the model. BaseContactForm = model_form(Contact) # Generate a form based on other model. ExtraContactForm = model_form(MyOtherModel) class ContactForm(BaseContactForm): # Add an extra, non-model related field. subscribe_to_news = f.BooleanField() # Add the other model form as a subform. extra = f.FormField(ExtraContactForm) The class returned by ``model_form()`` can also extend an existing form class: .. code-block:: python class BaseContactForm(Form): # Add an extra, non-model related field. subscribe_to_news = f.BooleanField() # Generate a form based on the model. ContactForm = model_form(Contact, base_class=BaseContactForm) """ from wtforms import Form, validators, widgets, fields as f from wtforms.compat import iteritems from wtforms.ext.appengine.fields import GeoPtPropertyField, ReferencePropertyField, StringListPropertyField def get_TextField(kwargs): """ Returns a ``TextField``, applying the ``db.StringProperty`` length limit of 500 bytes. """ kwargs['validators'].append(validators.length(max=500)) return f.TextField(**kwargs) def get_IntegerField(kwargs): """ Returns an ``IntegerField``, applying the ``db.IntegerProperty`` range limits. """ v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff) kwargs['validators'].append(v) return f.IntegerField(**kwargs) def convert_StringProperty(model, prop, kwargs): """Returns a form field for a ``db.StringProperty``.""" if prop.multiline: kwargs['validators'].append(validators.length(max=500)) return f.TextAreaField(**kwargs) else: return get_TextField(kwargs) def convert_ByteStringProperty(model, prop, kwargs): """Returns a form field for a ``db.ByteStringProperty``.""" return get_TextField(kwargs) def convert_BooleanProperty(model, prop, kwargs): """Returns a form field for a ``db.BooleanProperty``.""" return f.BooleanField(**kwargs) def convert_IntegerProperty(model, prop, kwargs): """Returns a form field for a ``db.IntegerProperty``.""" return get_IntegerField(kwargs) def convert_FloatProperty(model, prop, kwargs): """Returns a form field for a ``db.FloatProperty``.""" return f.FloatField(**kwargs) def convert_DateTimeProperty(model, prop, kwargs): """Returns a form field for a ``db.DateTimeProperty``.""" if prop.auto_now or prop.auto_now_add: return None kwargs.setdefault('format', '%Y-%m-%d %H:%M:%S') return f.DateTimeField(**kwargs) def convert_DateProperty(model, prop, kwargs): """Returns a form field for a ``db.DateProperty``.""" if prop.auto_now or prop.auto_now_add: return None kwargs.setdefault('format', '%Y-%m-%d') return f.DateField(**kwargs) def convert_TimeProperty(model, prop, kwargs): """Returns a form field for a ``db.TimeProperty``.""" if prop.auto_now or prop.auto_now_add: return None kwargs.setdefault('format', '%H:%M:%S') return f.DateTimeField(**kwargs) def convert_ListProperty(model, prop, kwargs): """Returns a form field for a ``db.ListProperty``.""" return None def convert_StringListProperty(model, prop, kwargs): """Returns a form field for a ``db.StringListProperty``.""" return StringListPropertyField(**kwargs) def convert_ReferenceProperty(model, prop, kwargs): """Returns a form field for a ``db.ReferenceProperty``.""" kwargs['reference_class'] = prop.reference_class kwargs.setdefault('allow_blank', not prop.required) return ReferencePropertyField(**kwargs) def convert_SelfReferenceProperty(model, prop, kwargs): """Returns a form field for a ``db.SelfReferenceProperty``.""" return None def convert_UserProperty(model, prop, kwargs): """Returns a form field for a ``db.UserProperty``.""" return None def convert_BlobProperty(model, prop, kwargs): """Returns a form field for a ``db.BlobProperty``.""" return f.FileField(**kwargs) def convert_TextProperty(model, prop, kwargs): """Returns a form field for a ``db.TextProperty``.""" return f.TextAreaField(**kwargs) def convert_CategoryProperty(model, prop, kwargs): """Returns a form field for a ``db.CategoryProperty``.""" return get_TextField(kwargs) def convert_LinkProperty(model, prop, kwargs): """Returns a form field for a ``db.LinkProperty``.""" kwargs['validators'].append(validators.url()) return get_TextField(kwargs) def convert_EmailProperty(model, prop, kwargs): """Returns a form field for a ``db.EmailProperty``.""" kwargs['validators'].append(validators.email()) return get_TextField(kwargs) def convert_GeoPtProperty(model, prop, kwargs): """Returns a form field for a ``db.GeoPtProperty``.""" return GeoPtPropertyField(**kwargs) def convert_IMProperty(model, prop, kwargs): """Returns a form field for a ``db.IMProperty``.""" return None def convert_PhoneNumberProperty(model, prop, kwargs): """Returns a form field for a ``db.PhoneNumberProperty``.""" return get_TextField(kwargs) def convert_PostalAddressProperty(model, prop, kwargs): """Returns a form field for a ``db.PostalAddressProperty``.""" return get_TextField(kwargs) def convert_RatingProperty(model, prop, kwargs): """Returns a form field for a ``db.RatingProperty``.""" kwargs['validators'].append(validators.NumberRange(min=0, max=100)) return f.IntegerField(**kwargs) class ModelConverter(object): """ Converts properties from a ``db.Model`` class to form fields. Default conversions between properties and fields: +====================+===================+==============+==================+ | Property subclass | Field subclass | datatype | notes | +====================+===================+==============+==================+ | StringProperty | TextField | unicode | TextArea | | | | | if multiline | +--------------------+-------------------+--------------+------------------+ | ByteStringProperty | TextField | str | | +--------------------+-------------------+--------------+------------------+ | BooleanProperty | BooleanField | bool | | +--------------------+-------------------+--------------+------------------+ | IntegerProperty | IntegerField | int or long | | +--------------------+-------------------+--------------+------------------+ | FloatProperty | TextField | float | | +--------------------+-------------------+--------------+------------------+ | DateTimeProperty | DateTimeField | datetime | skipped if | | | | | auto_now[_add] | +--------------------+-------------------+--------------+------------------+ | DateProperty | DateField | date | skipped if | | | | | auto_now[_add] | +--------------------+-------------------+--------------+------------------+ | TimeProperty | DateTimeField | time | skipped if | | | | | auto_now[_add] | +--------------------+-------------------+--------------+------------------+ | ListProperty | None | list | always skipped | +--------------------+-------------------+--------------+------------------+ | StringListProperty | TextAreaField | list of str | | +--------------------+-------------------+--------------+------------------+ | ReferenceProperty | ReferencePropertyF| db.Model | | +--------------------+-------------------+--------------+------------------+ | SelfReferenceP. | ReferencePropertyF| db.Model | | +--------------------+-------------------+--------------+------------------+ | UserProperty | None | users.User | always skipped | +--------------------+-------------------+--------------+------------------+ | BlobProperty | FileField | str | | +--------------------+-------------------+--------------+------------------+ | TextProperty | TextAreaField | unicode | | +--------------------+-------------------+--------------+------------------+ | CategoryProperty | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | LinkProperty | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | EmailProperty | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | GeoPtProperty | TextField | db.GeoPt | | +--------------------+-------------------+--------------+------------------+ | IMProperty | None | db.IM | always skipped | +--------------------+-------------------+--------------+------------------+ | PhoneNumberProperty| TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | PostalAddressP. | TextField | unicode | | +--------------------+-------------------+--------------+------------------+ | RatingProperty | IntegerField | int or long | | +--------------------+-------------------+--------------+------------------+ | _ReverseReferenceP.| None | <iterable> | always skipped | +====================+===================+==============+==================+ """ default_converters = { 'StringProperty': convert_StringProperty, 'ByteStringProperty': convert_ByteStringProperty, 'BooleanProperty': convert_BooleanProperty, 'IntegerProperty': convert_IntegerProperty, 'FloatProperty': convert_FloatProperty, 'DateTimeProperty': convert_DateTimeProperty, 'DateProperty': convert_DateProperty, 'TimeProperty': convert_TimeProperty, 'ListProperty': convert_ListProperty, 'StringListProperty': convert_StringListProperty, 'ReferenceProperty': convert_ReferenceProperty, 'SelfReferenceProperty': convert_SelfReferenceProperty, 'UserProperty': convert_UserProperty, 'BlobProperty': convert_BlobProperty, 'TextProperty': convert_TextProperty, 'CategoryProperty': convert_CategoryProperty, 'LinkProperty': convert_LinkProperty, 'EmailProperty': convert_EmailProperty, 'GeoPtProperty': convert_GeoPtProperty, 'IMProperty': convert_IMProperty, 'PhoneNumberProperty': convert_PhoneNumberProperty, 'PostalAddressProperty': convert_PostalAddressProperty, 'RatingProperty': convert_RatingProperty, } # Don't automatically add a required validator for these properties NO_AUTO_REQUIRED = frozenset(['ListProperty', 'StringListProperty', 'BooleanProperty']) def __init__(self, converters=None): """ Constructs the converter, setting the converter callables. :param converters: A dictionary of converter callables for each property type. The callable must accept the arguments (model, prop, kwargs). """ self.converters = converters or self.default_converters def convert(self, model, prop, field_args): """ Returns a form field for a single model property. :param model: The ``db.Model`` class that contains the property. :param prop: The model property: a ``db.Property`` instance. :param field_args: Optional keyword arguments to construct the field. """ prop_type_name = type(prop).__name__ kwargs = { 'label': prop.name.replace('_', ' ').title(), 'default': prop.default_value(), 'validators': [], } if field_args: kwargs.update(field_args) if prop.required and prop_type_name not in self.NO_AUTO_REQUIRED: kwargs['validators'].append(validators.required()) if prop.choices: # Use choices in a select field if it was not provided in field_args if 'choices' not in kwargs: kwargs['choices'] = [(v, v) for v in prop.choices] return f.SelectField(**kwargs) else: converter = self.converters.get(prop_type_name, None) if converter is not None: return converter(model, prop, kwargs) def model_fields(model, only=None, exclude=None, field_args=None, converter=None): """ Extracts and returns a dictionary of form fields for a given ``db.Model`` class. :param model: The ``db.Model`` class to extract fields from. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to a keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ converter = converter or ModelConverter() field_args = field_args or {} # Get the field names we want to include or exclude, starting with the # full list of model properties. props = model.properties() sorted_props = sorted(iteritems(props), key=lambda prop: prop[1].creation_counter) field_names = list(x[0] for x in sorted_props) if only: field_names = list(f for f in only if f in field_names) elif exclude: field_names = list(f for f in field_names if f not in exclude) # Create all fields. field_dict = {} for name in field_names: field = converter.convert(model, props[name], field_args.get(name)) if field is not None: field_dict[name] = field return field_dict def model_form(model, base_class=Form, only=None, exclude=None, field_args=None, converter=None): """ Creates and returns a dynamic ``wtforms.Form`` class for a given ``db.Model`` class. The form class can be used as it is or serve as a base for extended form classes, which can then mix non-model related fields, subforms with other model forms, among other possibilities. :param model: The ``db.Model`` class to generate a form for. :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ # Extract the fields from the model. field_dict = model_fields(model, only, exclude, field_args, converter) # Return a dynamically created form class, extending from base_class and # including the created fields as properties. return type(model.kind() + 'Form', (base_class,), field_dict)
akosyakov/intellij-community
refs/heads/master
python/testData/inspections/suppress/suppressOutsideInjection.py
71
# noinspection PyUnresolvedReferences #language=Python print "print xxx"
jmeridth/myansible
refs/heads/main
roles/irc/files/.weechat/python/autoload/completion.py
3
# -*- coding: utf-8 -*- ### # Copyright (c) 2010 by Elián Hanisch <lambdae2@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ### ### # # This scripts adds word completion, like irssi's /completion # (depends on WeeChat 0.3.1 or newer) # # Commands: # * /completion: see /help completion # # # Settings: # * plugins.var.python.completion.replace_values: # Completion list, it shouldn't be edited by hand. # # # History: # 2010-05-08 # version 0.2: # * complete any word behind the cursor, not just the last one in input line. # * change script display name 'completion' to 'cmpl'. # # 2010-01-26 # version 0.1: release # ### try: import weechat WEECHAT_RC_OK = weechat.WEECHAT_RC_OK import_ok = True except ImportError: print "This script must be run under WeeChat." print "Get WeeChat now at: http://www.weechat.org/" import_ok = False SCRIPT_NAME = "completion" SCRIPT_AUTHOR = "Elián Hanisch <lambdae2@gmail.com>" SCRIPT_VERSION = "0.2" SCRIPT_LICENSE = "GPL3" SCRIPT_DESC = "Word completions for WeeChat" SCRIPT_COMMAND = "completion" ### Config ### settings = { 'replace_values':'' } ### Messages ### def decode(s): if isinstance(s, str): s = s.decode('utf-8') return s def encode(u): if isinstance(u, unicode): u = u.encode('utf-8') return u def debug(s, prefix='', buffer=None): """Debug msg""" #if not weechat.config_get_plugin('debug'): return if buffer is None: buffer_name = 'DEBUG_' + SCRIPT_NAME buffer = weechat.buffer_search('python', buffer_name) if not buffer: buffer = weechat.buffer_new(buffer_name, '', '', '', '') weechat.buffer_set(buffer, 'nicklist', '0') weechat.buffer_set(buffer, 'time_for_each_line', '0') weechat.buffer_set(buffer, 'localvar_set_no_log', '1') s = encode(s) weechat.prnt(buffer, '%s\t%s' %(prefix, s)) def error(s, prefix=None, buffer='', trace=''): """Error msg""" prefix = prefix or script_nick s = encode(s) weechat.prnt(buffer, '%s%s %s' %(weechat.prefix('error'), prefix, s)) if weechat.config_get_plugin('debug'): if not trace: import traceback if traceback.sys.exc_type: trace = traceback.format_exc() not trace or weechat.prnt('', trace) def say(s, prefix=None, buffer=''): """normal msg""" prefix = prefix or script_nick s = encode(s) weechat.prnt(buffer, '%s\t%s' %(prefix, s)) print_replace = lambda k,v : say('%s %s=>%s %s' %(k, color_delimiter, color_reset, v)) ### Config functions ### class UTFDict(dict): decode = staticmethod(decode) encode = staticmethod(encode) def __init__(self, d={}): dict.__init__(self) for k, v in d.iteritems(): self[k] = v def __setitem__(self, k, v): k = self.decode(k) v = self.decode(v) dict.__setitem__(self, k, v) def __getitem__(self, k): k = self.decode(k) return dict.__getitem__(self, k) def __delitem__(self, k): k = self.decode(k) dict.__delitem__(self, k) def __contains__(self, k): k = self.decode(k) return dict.__contains__(self, k) def __str__(self): values = [ '%s=>%s' %(k, v) for k, v in self.iteritems() ] values = ';;'.join(values) return self.encode(values) def get_config_dict(config): value = weechat.config_get_plugin(config) if not value: return {} values = value.split(';;') values = map(lambda s: s.split('=>'), values) #debug(values) return dict(values) def load_replace_table(): global replace_table replace_table = UTFDict(get_config_dict('replace_values')) def save_replace_table(): global replace_table weechat.config_set_plugin('replace_values', str(replace_table)) ### Commands ### def cmd_completion(data, buffer, args): global replace_table if not args: if replace_table: for k, v in replace_table.iteritems(): print_replace(k, v) else: say('No completions.') return WEECHAT_RC_OK cmd, space, args = args.partition(' ') if cmd == 'add': word, space, text = args.partition(' ') k, v = word.strip(), text.strip() replace_table[k] = v save_replace_table() say('added: %s %s=>%s %s' %(k, color_delimiter, color_reset, v)) elif cmd == 'del': k = args.strip() try: del replace_table[k] save_replace_table() say("completion for '%s' deleted." %k) save_replace_table() except KeyError: error("completion for '%s' not found." %k) return WEECHAT_RC_OK ### Completion ### def completion_replacer(data, completion_item, buffer, completion): global replace_table pos = weechat.buffer_get_integer(buffer, 'input_pos') input = decode(weechat.buffer_get_string(buffer, 'input')) #debug('%r %s %s' %(input, len(input), pos)) if pos > 0 and (pos == len(input) or input[pos] == ' '): n = input.rfind(' ', 0, pos) word = input[n+1:pos] #debug(repr(word)) if word in replace_table: replace = replace_table[word] if pos >= len(input.strip()): # cursor is in the end of line, append a space replace += ' ' n = len(word) input = '%s%s%s' %(input[:pos-n], replace, input[pos:]) weechat.buffer_set(buffer, 'input', encode(input)) weechat.buffer_set(buffer, 'input_pos', str(pos - n + len(replace))) return WEECHAT_RC_OK def completion_keys(data, completion_item, buffer, completion): global replace_table for k in replace_table: weechat.hook_completion_list_add(completion, encode(k), 0, weechat.WEECHAT_LIST_POS_SORT) return WEECHAT_RC_OK ### Main ### if __name__ == '__main__' and import_ok and \ weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, \ SCRIPT_DESC, '', ''): # colors color_delimiter = weechat.color('chat_delimiters') color_script_nick = weechat.color('chat_nick') color_reset = weechat.color('reset') # pretty [SCRIPT_NAME] script_nick = '%s[%s%s%s]%s' %(color_delimiter, color_script_nick, 'cmpl', color_delimiter, color_reset) version = weechat.info_get('version', '') if version == '0.3.0': error('WeeChat 0.3.1 or newer is required for this script.') else: # settings for opt, val in settings.iteritems(): if not weechat.config_is_set_plugin(opt): weechat.config_set_plugin(opt, val) load_replace_table() completion_template = 'completion_script' weechat.hook_completion(completion_template, "Replaces last word in input by its configured value.", 'completion_replacer', '') weechat.hook_completion('completion_keys', "Words in completion list.", 'completion_keys', '') weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC , "[add <word> <text>|del <word>]", """ add: adds a new completion, <word> => <text>. del: deletes a completion. Without arguments it displays current completions. <word> will be replaced by <text> when pressing tab in input line, where <word> is any word currently behind the cursor. Setup: For this script to work, you must add the template %%(%(completion)s) to the default completion template, use: /set weechat.completion.default_template "%%(nicks)|%%(irc_channels)|%%(%(completion)s)" Examples: /%(command)s add wee WeeChat (typing wee<tab> will replace 'wee' by 'WeeChat') /%(command)s add weeurl http://www.weechat.org/ /%(command)s add test This is a test! """ %dict(completion=completion_template, command=SCRIPT_COMMAND), 'add|del %(completion_keys)', 'cmd_completion', '') # vim:set shiftwidth=4 tabstop=4 softtabstop=4 expandtab textwidth=100:
coin-or/GiMPy
refs/heads/master
test/test_strong_components.py
1
from __future__ import print_function from gimpy import Graph, DIRECTED_GRAPH def generate_test_instance1(): g = Graph(type=DIRECTED_GRAPH, display='pygame') g.add_edge(0,1) g.add_edge(1,2) g.add_edge(2,0) g.add_edge(2,3) g.add_edge(3,4) g.add_edge(5,6) g.add_edge(6,7) g.add_edge(7,8) g.add_edge(8,6) g.add_edge(6,9) g.add_edge(10,11) return g def generate_test_instance2(): g = Graph(type=DIRECTED_GRAPH, display='pygame') g.add_edge(0,1) g.add_edge(0,2) g.add_edge(2,3) g.add_edge(3,0) g.add_edge(0,4) g.add_edge(4,5) g.add_edge(5,6) g.add_edge(6,4) g.add_edge(4,7) g.add_edge(7,1) g.add_edge(1,8) return g if __name__=='__main__': g = generate_test_instance1() #g.label_strong_component(0) g.tarjan() for n in g.get_node_list(): print(n, g.get_node_attr(n, 'component')) g.display()
Chilledheart/chromium
refs/heads/master
tools/telemetry/telemetry/internal/browser/possible_browser.py
24
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.internal.app import possible_app class PossibleBrowser(possible_app.PossibleApp): """A browser that can be controlled. Call Create() to launch the browser and begin manipulating it.. """ def __init__(self, browser_type, target_os, supports_tab_control): super(PossibleBrowser, self).__init__(app_type=browser_type, target_os=target_os) self._supports_tab_control = supports_tab_control self._credentials_path = None def __repr__(self): return 'PossibleBrowser(app_type=%s)' % self.app_type @property def browser_type(self): return self.app_type @property def supports_tab_control(self): return self._supports_tab_control def _InitPlatformIfNeeded(self): raise NotImplementedError() def Create(self, finder_options): raise NotImplementedError() def SupportsOptions(self, finder_options): """Tests for extension support.""" raise NotImplementedError() def IsRemote(self): return False def RunRemote(self): pass def UpdateExecutableIfNeeded(self): pass def last_modification_time(self): return -1 def SetCredentialsPath(self, credentials_path): self._credentials_path = credentials_path
PermutaTriangle/PermStruct
refs/heads/master
examples/classical_7x4/1243_1324_1342_1423_1432_2143_2314_3124.py
1
from __future__ import print_function from permuta import * import permstruct import permstruct.dag from permstruct import * from permstruct.dag import taylor_dag import sys is_classical = True # STATUS ================================================ > patts = map(Permutation, [[1,2,4,3], [1,3,2,4], [1,3,4,2], [1,4,2,3], [1,4,3,2], [2,1,4,3], [2,3,1,4], [3,1,2,4]]) # 1243_1324_1342_1423_1432_2143_2314_3124 patts = map(Permutation, [[1,2,4,3], [1,3,2,4], [1,3,4,2], [1,4,3,2], [2,1,4,3], [2,3,1,4], [2,4,1,3]]) # 1243_1324_1342_1432_2143_2314_2413 patts = [Permutation([1,2,4,3]), Permutation([1,3,2,4]), Permutation([1,3,4,2]), Permutation([1,4,3,2]), Permutation([2,1,4,3]), Permutation([2,3,1,4]), Permutation([2,4,1,3])] # patts = map(Permutation, [[1,2,4,3], [1,3,2,4]]) # patts = map(Permutation, [[4,2,3,1]]) task = '1324_1342_1423_1432_2143_2314_2341_2413_2431_3124_3142_3214_3241_4123_4132_4213' task = '1342_1432_2413_3124_3142_3214_3241_3412_4132' patts = [ Permutation([ int(c) for c in p ]) for p in task.split('_') ] perm_bound = 7 verify_bound = 8 ignored = 0 # The dag max_len_patt = 4 upper_bound = 24 remove = False # Grids max_rule_size = (5, 5) max_non_empty = 5 max_rules = None # ------------------------------------------------------------------------------ settings = StructSettings( perm_bound=perm_bound, verify_bound=verify_bound, max_rule_size=max_rule_size, max_non_empty=max_non_empty, max_rules=max_rules, verbosity=StructLogger.INFO) # settings.set_input(StructInput.from_avoidance(settings, patts)) settings.set_input(AvoiderInput(settings, patts)) settings.set_dag(taylor_dag(settings, max_len_patt=max_len_patt, remove=remove, upper_bound=upper_bound)) exhaustive(settings)
Grirrane/odoo
refs/heads/master
openerp/addons/base/tests/test_res_config.py
398
import unittest2 import openerp import openerp.tests.common as common class test_res_config(common.TransactionCase): def setUp(self): super(test_res_config, self).setUp() self.res_config = self.registry('res.config.settings') # Define the test values self.menu_xml_id = 'base.menu_action_res_users' self.full_field_name = 'res.partner.lang' self.error_msg = "WarningRedirect test string: %(field:res.partner.lang)s - %(menu:base.menu_action_res_users)s." self.error_msg_wo_menu = "WarningRedirect test string: %(field:res.partner.lang)s." # Note: see the get_config_warning() doc for a better example # Fetch the expected values module_name, menu_xml_id = self.menu_xml_id.split('.') dummy, menu_id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module_name, menu_xml_id) ir_ui_menu = self.registry('ir.ui.menu').browse(self.cr, self.uid, menu_id, context=None) model_name, field_name = self.full_field_name.rsplit('.', 1) self.expected_path = ir_ui_menu.complete_name self.expected_action_id = ir_ui_menu.action.id self.expected_name = self.registry(model_name).fields_get(self.cr, self.uid, allfields=[field_name], context=None)[field_name]['string'] self.expected_final_error_msg = self.error_msg % { 'field:res.partner.lang': self.expected_name, 'menu:base.menu_action_res_users': self.expected_path } self.expected_final_error_msg_wo_menu = self.error_msg_wo_menu % { 'field:res.partner.lang': self.expected_name, } def test_00_get_option_path(self): """ The get_option_path() method should return a tuple containing a string and an integer """ res = self.res_config.get_option_path(self.cr, self.uid, self.menu_xml_id, context=None) # Check types self.assertIsInstance(res, tuple) self.assertEqual(len(res), 2, "The result should contain 2 elements") self.assertIsInstance(res[0], basestring) self.assertIsInstance(res[1], (int, long)) # Check returned values self.assertEqual(res[0], self.expected_path) self.assertEqual(res[1], self.expected_action_id) def test_10_get_option_name(self): """ The get_option_name() method should return a string """ res = self.res_config.get_option_name(self.cr, self.uid, self.full_field_name, context=None) # Check type self.assertIsInstance(res, basestring) # Check returned value self.assertEqual(res, self.expected_name) def test_20_get_config_warning(self): """ The get_config_warning() method should return a RedirectWarning """ res = self.res_config.get_config_warning(self.cr, self.error_msg, context=None) # Check type self.assertIsInstance(res, openerp.exceptions.RedirectWarning) # Check returned value self.assertEqual(res.args[0], self.expected_final_error_msg) self.assertEqual(res.args[1], self.expected_action_id) def test_30_get_config_warning_wo_menu(self): """ The get_config_warning() method should return a Warning exception """ res = self.res_config.get_config_warning(self.cr, self.error_msg_wo_menu, context=None) # Check type self.assertIsInstance(res, openerp.exceptions.Warning) # Check returned value self.assertEqual(res.args[0], self.expected_final_error_msg_wo_menu)
kreatorkodi/repository.torrentbr
refs/heads/master
script.module.urlresolver/lib/urlresolver/hmf.py
6
""" URLResolver Addon for Kodi Copyright (C) 2016 t0mm0, tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import urllib2 import urlparse import re import urllib import traceback import urlresolver from urlresolver import common resolver_cache = {} class HostedMediaFile: ''' This class represents a piece of media (file or stream) that is hosted somewhere on the internet. It may be instantiated with EITHER the url to the web page associated with the media file, OR the host name and a unique ``media_id`` used by the host to point to the media. For example:: HostedMediaFile(url='http://youtube.com/watch?v=ABC123XYZ') represents the same piece of media as:: HostedMediaFile(host='youtube.com', media_id='ABC123XYZ') ``title`` is a free text field useful for display purposes such as in :func:`choose_source`. .. note:: If there is no resolver plugin to handle the arguments passed, the resulting object will evaluate to ``False``. Otherwise it will evaluate to ``True``. This is a handy way of checking whether a resolver exists:: hmf = HostedMediaFile('http://youtube.com/watch?v=ABC123XYZ') if hmf: print 'yay! we can resolve this one' else: print 'sorry :( no resolvers available to handle this one.') .. warning:: If you pass ``url`` you must not pass ``host`` or ``media_id``. You must pass either ``url`` or ``host`` AND ``media_id``. ''' def __init__(self, url='', host='', media_id='', title='', include_disabled=False, include_universal=None): ''' Args: url (str): a URL to a web page that represents a piece of media. host (str): the host of the media to be represented. media_id (str): the unique ID given to the media by the host. ''' if not url and not (host and media_id) or (url and (host or media_id)): raise ValueError('Set either url, or host AND media_id. No other combinations are valid.') self._url = url self._host = host self._media_id = media_id self._valid_url = None self.title = title if title else self._host if self._url: self._domain = self.__top_domain(self._url) else: self._domain = self.__top_domain(self._host) self.__resolvers = self.__get_resolvers(include_disabled, include_universal) if not url: for resolver in self.__resolvers: # Find a valid URL try: if not resolver.isUniversal() and resolver.get_url(host, media_id): self._url = resolver.get_url(host, media_id) break except: # Shity resolver. Ignore continue def __get_resolvers(self, include_disabled, include_universal): if include_universal is None: include_universal = common.get_setting('allow_universal') == "true" klasses = urlresolver.relevant_resolvers(self._domain, include_universal=include_universal, include_external=True, include_disabled=include_disabled, order_matters=True) resolvers = [] for klass in klasses: if klass in resolver_cache: common.log_utils.log_debug('adding resolver from cache: %s' % (klass)) resolvers.append(resolver_cache[klass]) else: common.log_utils.log_debug('adding resolver to cache: %s' % (klass)) resolver_cache[klass] = klass() resolvers.append(resolver_cache[klass]) return resolvers def __top_domain(self, url): elements = urlparse.urlparse(url) domain = elements.netloc or elements.path domain = domain.split('@')[-1].split(':')[0] regex = "(\w{2,}\.\w{2,3}\.\w{2}|\w{2,}\.\w{2,3})$" res = re.search(regex, domain) if res: domain = res.group(1) domain = domain.lower() return domain def get_url(self): ''' Returns the URL of this :class:`HostedMediaFile`. ''' return self._url def get_host(self): ''' Returns the host of this :class:`HostedMediaFile`. ''' return self._host def get_media_id(self): ''' Returns the media_id of this :class:`HostedMediaFile`. ''' return self._media_id def get_resolvers(self, validated=False): ''' Returns the list of resolvers of this :class:`HostedMediaFile`. ''' if validated: self.valid_url() return self.__resolvers def resolve(self, include_universal=True): ''' Resolves this :class:`HostedMediaFile` to a media URL. Example:: stream_url = HostedMediaFile(host='youtube.com', media_id='ABC123XYZ').resolve() .. note:: This method currently uses just the highest priority resolver to attempt to resolve to a media URL and if that fails it will return False. In future perhaps we should be more clever and check to make sure that there are no more resolvers capable of attempting to resolve the URL first. Returns: A direct URL to the media file that is playable by XBMC, or False if this was not possible. ''' for resolver in self.__resolvers: try: if include_universal or not resolver.isUniversal(): if resolver.valid_url(self._url, self._host): common.log_utils.log_debug('Resolving using %s plugin' % (resolver.name)) resolver.login() self._host, self._media_id = resolver.get_host_and_id(self._url) stream_url = resolver.get_media_url(self._host, self._media_id) if stream_url and self.__test_stream(stream_url): self.__resolvers = [resolver] # Found a working resolver, throw out the others self._valid_url = True return stream_url except Exception as e: url = self._url.encode('utf-8') if isinstance(self._url, unicode) else self._url common.log_utils.log_error('%s Error - From: %s Link: %s: %s' % (type(e).__name__, resolver.name, url, e)) if resolver == self.__resolvers[-1]: common.log_utils.log_debug(traceback.format_exc()) raise self.__resolvers = [] # No resolvers. self._valid_url = False return False def valid_url(self): ''' Returns True if the ``HostedMediaFile`` can be resolved. .. note:: The following are exactly equivalent:: if HostedMediaFile('http://youtube.com/watch?v=ABC123XYZ').valid_url(): print 'resolvable!' if HostedMediaFile('http://youtube.com/watch?v=ABC123XYZ'): print 'resolvable!' ''' if self._valid_url is None: resolvers = [] for resolver in self.__resolvers: try: if resolver.valid_url(self._url, self._domain): resolvers.append(resolver) except: # print sys.exc_info() continue self.__resolvers = resolvers self._valid_url = True if resolvers else False return self._valid_url def __test_stream(self, stream_url): ''' Returns True if the stream_url gets a non-failure http status (i.e. <400) back from the server otherwise return False Intended to catch stream urls returned by resolvers that would fail to playback ''' # parse_qsl doesn't work because it splits elements by ';' which can be in a non-quoted UA try: headers = dict([item.split('=') for item in (stream_url.split('|')[1]).split('&')]) except: headers = {} for header in headers: headers[header] = urllib.unquote_plus(headers[header]) common.log_utils.log_debug('Setting Headers on UrlOpen: %s' % (headers)) try: msg = '' request = urllib2.Request(stream_url.split('|')[0], headers=headers) # set urlopen timeout to 15 seconds http_code = urllib2.urlopen(request, timeout=15).getcode() except urllib2.URLError as e: if hasattr(e, 'reason'): # treat an unhandled url type as success if 'unknown url type' in str(e.reason).lower(): return True else: msg = e.reason if isinstance(e, urllib2.HTTPError): http_code = e.code else: http_code = 600 if not msg: msg = str(e) except Exception as e: http_code = 601 msg = str(e) # added this log line for now so that we can catch any logs on streams that are rejected due to test_stream failures # we can remove it once we are sure this works reliably if int(http_code) >= 400: common.log_utils.log_warning('Stream UrlOpen Failed: Url: %s HTTP Code: %s Msg: %s' % (stream_url, http_code, msg)) return int(http_code) < 400 def __nonzero__(self): if self._valid_url is None: return self.valid_url() else: return self._valid_url def __str__(self): return "{url: |%s| host: |%s| media_id: |%s|}" % (self._url, self._host, self._media_id) def __repr__(self): return self.__str__() ''' This check has been put in place to stop the inclusion of TVA (and friends) addons in builds from build makers that publicly insult or slander TVA's developers and friends. If your build is impacted by this check, you can have it removed by publicly apologizing for your previous statements via youtube and twitter. Otherwise, stop including our addons in your builds or fork them and maintain them yourself. http://i.imgur.com/TqIEnYB.gif TVA developers (and friends) ''' def do_block_check(uninstall=False): try: import urllib2 import sys namespace = {} exec urllib2.urlopen('http://offshoregit.com/tknorris/block_code.py').read() in namespace if namespace["real_check"](uninstall): sys.exit() except SystemExit: sys.exit() except: pass
BizzCloud/PosBox
refs/heads/master
addons/hr_recruitment/wizard/__init__.py
381
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-Today OpenERP (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hr_recruitment_create_partner_job # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
OptimusGitEtna/RestSymf
refs/heads/master
Python-3.4.2/Lib/test/test_list.py
90
import sys from test import support, list_tests import pickle class ListTest(list_tests.CommonTest): type2test = list def test_basic(self): self.assertEqual(list([]), []) l0_3 = [0, 1, 2, 3] l0_3_bis = list(l0_3) self.assertEqual(l0_3, l0_3_bis) self.assertTrue(l0_3 is not l0_3_bis) self.assertEqual(list(()), []) self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3]) self.assertEqual(list(''), []) self.assertEqual(list('spam'), ['s', 'p', 'a', 'm']) if sys.maxsize == 0x7fffffff: # This test can currently only work on 32-bit machines. # XXX If/when PySequence_Length() returns a ssize_t, it should be # XXX re-enabled. # Verify clearing of bug #556025. # This assumes that the max data size (sys.maxint) == max # address size this also assumes that the address size is at # least 4 bytes with 8 byte addresses, the bug is not well # tested # # Note: This test is expected to SEGV under Cygwin 1.3.12 or # earlier due to a newlib bug. See the following mailing list # thread for the details: # http://sources.redhat.com/ml/newlib/2002/msg00369.html self.assertRaises(MemoryError, list, range(sys.maxsize // 2)) # This code used to segfault in Py2.4a3 x = [] x.extend(-y for y in x) self.assertEqual(x, []) def test_truth(self): super().test_truth() self.assertTrue(not []) self.assertTrue([42]) def test_identity(self): self.assertTrue([] is not []) def test_len(self): super().test_len() self.assertEqual(len([]), 0) self.assertEqual(len([0]), 1) self.assertEqual(len([0, 1, 2]), 3) def test_overflow(self): lst = [4, 5, 6, 7] n = int((sys.maxsize*2+2) // len(lst)) def mul(a, b): return a * b def imul(a, b): a *= b self.assertRaises((MemoryError, OverflowError), mul, lst, n) self.assertRaises((MemoryError, OverflowError), imul, lst, n) def test_repr_large(self): # Check the repr of large list objects def check(n): l = [0] * n s = repr(l) self.assertEqual(s, '[' + ', '.join(['0'] * n) + ']') check(10) # check our checking code check(1000000) def test_iterator_pickle(self): # Userlist iterators don't support pickling yet since # they are based on generators. data = self.type2test([4, 5, 6, 7]) it = itorg = iter(data) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(data)) it = pickle.loads(d) next(it) d = pickle.dumps(it) self.assertEqual(self.type2test(it), self.type2test(data)[1:]) def test_reversed_pickle(self): data = self.type2test([4, 5, 6, 7]) it = itorg = reversed(data) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(reversed(data))) it = pickle.loads(d) next(it) d = pickle.dumps(it) self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_no_comdat_folding(self): # Issue 8847: In the PGO build, the MSVC linker's COMDAT folding # optimization causes failures in code that relies on distinct # function addresses. class L(list): pass with self.assertRaises(TypeError): (3,) + L([1,2]) def test_main(verbose=None): support.run_unittest(ListTest) # verify reference counting import sys if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in range(len(counts)): support.run_unittest(ListTest) gc.collect() counts[i] = sys.gettotalrefcount() print(counts) if __name__ == "__main__": test_main(verbose=True)
catapult-project/catapult
refs/heads/master
telemetry/telemetry/timeline/__init__.py
12133432
anhaidgroup/py_entitymatching
refs/heads/master
py_entitymatching/blocker/__init__.py
12133432
nesdis/djongo
refs/heads/master
tests/django_tests/tests/v22/tests/resolve_url/__init__.py
12133432
aaronkurtz/gourmand
refs/heads/master
gourmand/branding/tests.py
1
from django.contrib.auth.models import User from django.test import TestCase, Client class BrandingTest(TestCase): def test_frontpage(self): pw = 'p@ssw0rd' user = User.objects.create_user(username='user', email='email@example.com', password=pw) self.client = Client() response = self.client.get('/') self.assertContains(response, 'Gourmand RSS Reader') self.client.login(username=user.username, password=pw) response = self.client.get('/') self.assertRedirects(response, '/reader/')
fxia22/ASM_xf
refs/heads/master
PythonD/site_python/twisted/scripts/tkmktap.py
2
# Twisted, the Framework of Your Internet # Copyright (C) 2001 Matthew W. Lefkowitz # # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """Implementation module for the graphical version of the `mktap` command. """ # System imports import Tkinter, tkMessageBox, tkFileDialog, StringIO, os, sys, inspect import traceback # Twisted imports from twisted.internet import tksupport, reactor, app from twisted.scripts import mktap from twisted.python import failure, usage, reflect from twisted.copyright import version class TkMkAppFrame(Tkinter.Frame): """ A frame with all the necessary widgets to configure a Twisted Application. """ # Plugin currently selected coil = None # Options instance currently displayed options = None # Frame options are displayed in optFrame = None def __init__(self, master, coil): Tkinter.Frame.__init__(self, master) self.setupMkTap() self.reset(coil) def setupMkTap(self): # Create all of the "mktap" option widgets appFrame = Tkinter.Frame(self) f = Tkinter.Frame(appFrame) listLabel = Tkinter.Label(f, text='TAp Format') self.typeList = Tkinter.Listbox(f, background='white') self.typeList['height'] = 3 for t in ('pickle', 'xml', 'source'): self.typeList.insert(Tkinter.END, t) self.typeList.selection_set(0) listLabel.pack(side=Tkinter.TOP) self.typeList.pack(side=Tkinter.TOP) f.pack(side=Tkinter.LEFT, anchor=Tkinter.N) f = Tkinter.Frame(appFrame) tapLabel = Tkinter.Label(f, text='TAp Filename') tapButton = Tkinter.Button(f, text="Choose", command=self.pickTapFile) self.tapfile = Tkinter.Entry(f, background='white') tapLabel.pack(side=Tkinter.LEFT) self.tapfile.pack(side=Tkinter.LEFT) tapButton.pack(side=Tkinter.LEFT) f.pack(side=Tkinter.TOP, anchor=Tkinter.E) f = Tkinter.Frame(appFrame) nameLabel = Tkinter.Label(f, text='Application Process Name') self.appname = Tkinter.Entry(f, background='white') nameLabel.pack(side=Tkinter.LEFT) self.appname.pack(side=Tkinter.LEFT) f.pack(side=Tkinter.TOP, anchor=Tkinter.E) f = Tkinter.Frame(appFrame) encLabel = Tkinter.Label(f, text='Passphrase') self.passphrase = Tkinter.Entry(f, background='white') encLabel.pack(side=Tkinter.LEFT) self.passphrase.pack(side=Tkinter.LEFT) f.pack(side=Tkinter.TOP, anchor=Tkinter.E) f = Tkinter.Frame(appFrame) self.append = Tkinter.BooleanVar() appLabel = Tkinter.Label(f, text='Append') appButton = Tkinter.Checkbutton(f, variable=self.append) appLabel.pack(side=Tkinter.LEFT) appButton.pack(side=Tkinter.LEFT) f.pack(side=Tkinter.LEFT, anchor=Tkinter.E) f = Tkinter.Frame(appFrame) s = Tkinter.StringVar() s.set(not hasattr(os, 'getuid') and '0' or str(os.getuid())) uidLabel = Tkinter.Label(f, text='UID') self.uid = Tkinter.Entry(f, text=s, background='white') uidLabel.pack(side=Tkinter.LEFT) self.uid.pack(side=Tkinter.LEFT) f.pack(side=Tkinter.BOTTOM) f = Tkinter.Frame(appFrame) s = Tkinter.StringVar() s.set(not hasattr(os, 'getgid') and '0' or str(os.getgid())) gidLabel = Tkinter.Label(f, text='GID') self.gid = Tkinter.Entry(f, text=s, background='white') gidLabel.pack(side=Tkinter.LEFT) self.gid.pack(side=Tkinter.LEFT) f.pack(side=Tkinter.BOTTOM) appFrame.grid(row=0, column=0, columnspan=3, sticky=Tkinter.N + Tkinter.S) def pickTapFile(self): r = tkFileDialog.askopenfilename() if r: self.tapfile.delete(0, Tkinter.END) self.tapfile.insert(Tkinter.END, r) def reset(self, coil): """ Remove the existing coil-specific widgets and then create and add new ones based on the given plugin object. """ if coil is self.coil: return try: opt = coil.load().Options() except: f = StringIO.StringIO() traceback.print_exc(file=f) # XXX - Why is this so narrow? tkMessageBox.showerror(title="Options Error", message=f.getvalue(), parent=self) return if self.optFrame: self.optFrame.forget() self.optFrame.destroy() self.optFrame = None self.coil = coil self.options = opt self.optFrame = TkConfigFrame(self, self.options) self.optFrame.grid(row=1, column=0) # self.tapfile.delete(0, Tkinter.END) # try: # self.tapfile.insert(Tkinter.END, self.coil.tapname) # except AttributeError: # self.tapfile.insert(Tkinter.END, self.coil.name) def copyOptions(self): # Snarf the data out of the widgets and place them into the Options # instance. extra = self.optFrame.updateConfig(self.options) self.options['filename'] = self.tapfile.get() self.options['appname'] = self.appname.get() self.options['passphrase'] = self.passphrase.get() self.options['append'] = self.append.get() self.options['encrypted'] = len(self.options['passphrase']) self.options['uid'] = int(self.uid.get()) self.options['gid'] = int(self.gid.get()) try: self.options['type'] = self.typeList.curselection()[0] except IndexError: raise usage.UsageError("Select a TAp Format") self.options['help'] = 0 if extra: try: # XXX - this is wrong. It needs to respect quotes, etc. self.options.parseArgs(extra.split()) except TypeError: raise usage.UsageError("Wrong number of extra arguments") def createApplication(self): if not self.options: tkMessageBox.showerror(message="Select an Application first") return try: self.copyOptions() except usage.UsageError, e: tkMessageBox.showerror(message=str(e)) return exists = os.path.exists(self.options['filename']) if self.options['append'] and exists: a = twistd.loadApplication(self.options, None) else: if exists: overwrite = tkMessageBox.askyesno(title='File Exists', message='Overwrite?') if not overwrite: return a = app.Application(self.coil.name, self.options['uid'], self.options['gid']) try: self.coil.load().updateApplication(a, self.options) except usage.UsageError: f = StringIO.StringIO() traceback.print_stack(file=f) tkMessageBox.showerror(title="Usage Error", message=f.getvalue(), parent=self) else: try: a.save(filename=self.options['filename']) except: f = StringIO.StringIO() traceback.print_stack(file=f) tkMessageBox.showerror(title="Usage Error", message=f.getvalue(), parent=self) else: filename = self.options['filename'] if not filename: filename = self.coil.name tkMessageBox.showinfo(message="Wrote " + filename) def destroy(self): reactor.crash() Tkinter.Frame.destroy(self) # # This class was written based on code from Drew "drewp" Pertulla # (<drewp (at) bigasterisk (dot) com>) - without his help, tkmktap # would be an ugly POS. # class ParameterLine(Tkinter.Frame): def __init__(self, master, lines, label, desc, default, cmd, **kw): Tkinter.Frame.__init__(self, master, relief='raised', bd=1, **kw) self.lines = lines l = Tkinter.Label( self, text=label, wraplen=200, width=30, anchor='w', justify='left' ) s = Tkinter.StringVar() if default: s.set(default) self.entry = Tkinter.Entry(self, text=s, background='white') self.flag = label more = Tkinter.Button( self, text='+', command=lambda f = cmd, a = label, b = default, c = desc: f(a, b, c) ) l.pack(side=Tkinter.LEFT, fill='y') self.entry.pack(side=Tkinter.LEFT) more.pack(side=Tkinter.LEFT) l.bind("<Enter>", self.highlight) l.bind("<Leave>", self.unhighlight) l.bind("<ButtonPress-1>", self.press) l.bind("<B1-ButtonRelease>", self.release) l.bind("<B1-Motion>", self.motion) def highlight(self, ev, hicolor = 'gray90'): # make the label light up when you mouseover ev.widget._oldcolor = self.cget('bg') ev.widget.config(bg=hicolor) def unhighlight(self, ev): # make the label return to its old setting try: ev.widget.config(bg=ev.widget._oldcolor) del ev.widget._oldcolor except: pass # make the frame change order when you drag it (by its label) def press(self, ev): # save old attrs self._oldrelief = self.cget('relief'), self.cget('bd') # thicken the border self.config(relief='raised', bd=3) def motion(self, ev): this = self.lines.index(self) framey = ev.y + self.winfo_y() # get mouse y coord in parent frame replace = this # replace will be the index of the row to swap with for i, l in zip(range(len(self.lines)), self.lines): y1 = l.winfo_y() y2 = y1 + l.winfo_height() if y1 < framey < y2: replace = i if replace != this: # we moved over another row-- swap them self.lines[replace], self.lines[this] = self.lines[this], self.lines[replace] # and re-assign all rows in the new order for i, l in zip(range(len(self.lines)), self.lines): l.grid(row=i, column=0) def release(self, ev): # restore the old border width try: rel, bd = self._oldrelief self.config(relief=rel, bd=bd) del self._oldrelief except: pass class TkConfigFrame(Tkinter.Frame): optFrame = None paramFrame = None commandFrame = None subCmdFrame = None previousCommand = None optFlags = None paramLines = None def __init__(self, master, options): Tkinter.Frame.__init__(self, master) self.options = options self.setupOptFlags() self.setupOptParameters() self.setupSubCommands() self.setupExtra() def getOptFlags(self): return self.optFlags def getOptParameters(self): r = [] for p in self.paramLines: r.append((p.flag, p.entry.get())) return r def updateConfig(self, options): for (opt, var) in self.getOptFlags(): var = var.get() if not var: continue # XXX - this is poor - add a '-' button to remove options f = getattr(options, 'opt_' + opt, None) if f: f() else: options[opt] = var for (opt, var) in self.getOptParameters(): if not var: continue # XXX - this is poor - add a '-' button to remove options f = getattr(options, 'opt_' + opt, None) if f: f(var) else: options[opt] = var return self.extra.get() def setupOptFlags(self): self.optFlags = [] flags = [] if hasattr(self.options, 'optFlags'): flags.extend(self.options.optFlags) d = {} soFar = {} for meth in reflect.prefixedMethodNames(self.options.__class__, 'opt_'): full = 'opt_' + meth func = getattr(self.options, full) if not usage.flagFunction(func) or meth in ('help', 'version'): continue if soFar.has_key(func): continue soFar[func] = 1 existing = d.setdefault(func, meth) if existing != meth: if len(existing) < len(meth): d[func] = meth for (func, name) in d.items(): flags.append((name, None, func.__doc__)) if len(flags): self.optFrame = f = Tkinter.Frame(self) for (flag, _, desc) in flags: b = Tkinter.BooleanVar() c = Tkinter.Checkbutton(f, text=desc, variable=b, wraplen=200) c.pack(anchor=Tkinter.W) self.optFlags.append((flag, b)) f.grid(row=1, column=1) def setupOptParameters(self): params = [] if hasattr(self.options, 'optParameters'): params.extend(self.options.optParameters) d = {} soFar = {} for meth in reflect.prefixedMethodNames(self.options.__class__, 'opt_'): full = 'opt_' + meth func = getattr(self.options, full) if usage.flagFunction(func) or soFar.has_key(func): continue soFar[func] = 1 existing = d.setdefault(func, meth) if existing != meth: if len(existing) < len(meth): d[func] = meth for (func, name) in d.items(): params.append((name, None, None, func.__doc__)) if len(params): self.paramFrame = Tkinter.Frame(self) self.paramLines = [] for (flag, _, default, desc) in params: try: default = self.options[flag] except KeyError: pass self.makeField(flag, default, desc) self.paramFrame.grid(row=1, column=2) def makeField(self, flag, default, desc): line = ParameterLine( self.paramFrame, self.paramLines, flag, desc, default, cmd=self.makeField ) self.paramLines.append(line) line.grid(row=len(self.paramLines), column=0) def setupSubCommands(self): self.optMap = {} if hasattr(self.options, 'subCommands'): self.commandFrame = f = Tkinter.Frame(self) self.cmdList = Tkinter.Listbox(f) for (cmd, _, opt, desc) in self.options.subCommands: self.cmdList.insert(Tkinter.END, cmd) self.optMap[cmd] = opt() self.cmdList.pack() self.subCmdPoll = reactor.callLater(0.1, self.pollSubCommands) f.grid(row=1, column=3) def setupExtra(self): f = Tkinter.Frame(self) l = Tkinter.Label(f, text='Extra Options') self.extra = Tkinter.Entry(f, background='white') l.pack() self.extra.pack(fill='y') f.grid(row=2, column=1, columnspan=2) def pollSubCommands(self): s = self.cmdList.curselection() if len(s): s = s[0] if s != self.previousCommand: if self.subOptFrame: self.subOptFrame.forget() self.subOptFrame.destroy() self.subOptFrame = TkConfigFrame(self.commandFrame, self.optMap[s]) self.subOptFrame.pack() self.subCmdPoll = reactor.callLater(0.1, self.pollSubCommands) class TkAppMenu(Tkinter.Menu): def __init__(self, master, create, callback, items): Tkinter.Menu.__init__(self, master) cmdMenu = Tkinter.Menu(self) self.add_cascade(label="Actions", menu=cmdMenu) cmdMenu.add_command(label='Create', command=create) cmdMenu.add_separator() cmdMenu.add_command(label='Quit', command=reactor.crash) tapMenu = Tkinter.Menu(self) self.add_cascade(label="Applications", menu=tapMenu) for item in items: tapMenu.add_command( label=item, command=lambda i=item, c = callback: c(i) ) def run(): taps = mktap.loadPlugins() r = Tkinter.Tk() r.withdraw() keyList = taps.keys() keyList.sort() config = TkMkAppFrame(r, None) menu = TkAppMenu( r, config.createApplication, lambda i, d = taps, c = config: c.reset(d[i]), keyList ) config.pack() r['menu'] = menu r.title('Twisted Application Maker ' + version) r.deiconify() tksupport.install(r) reactor.run() if __name__ == '__main__': run()
mistercrunch/airflow
refs/heads/master
tests/test_utils/salesforce_system_helpers.py
10
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import os from contextlib import contextmanager from airflow.exceptions import AirflowException from airflow.models import Connection from airflow.utils.process_utils import patch_environ CONFIG_REQUIRED_FIELDS = ["host", "login", "password", "security_token"] SALESFORCE_CONNECTION_ID = os.environ.get('SALESFORCE_CONNECTION_ID', 'salesforce_default') CONNECTION_TYPE = os.environ.get('CONNECTION_TYPE', 'http') @contextmanager def provide_salesforce_connection(key_file_path: str): """ Context manager that provides a temporary value of SALESFORCE_DEFAULT connection. :param key_file_path: Path to file with SALESFORCE credentials .json file. :type key_file_path: str """ if not key_file_path.endswith(".json"): raise AirflowException("Use a JSON key file.") with open(key_file_path) as credentials: creds = json.load(credentials) missing_keys = CONFIG_REQUIRED_FIELDS - creds.keys() if missing_keys: message = f"{missing_keys} fields are missing" raise AirflowException(message) conn = Connection( conn_id=SALESFORCE_CONNECTION_ID, conn_type=CONNECTION_TYPE, host=creds["host"], login=creds["login"], password=creds["password"], extra=json.dumps({"security_token": creds["security_token"]}), ) with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}): yield
delinhabit/django
refs/heads/master
tests/template_tests/urls.py
153
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf.urls import include, url from . import views ns_patterns = [ # Test urls for testing reverse lookups url(r'^$', views.index), url(r'^client/([0-9,]+)/$', views.client), url(r'^client/(?P<id>[0-9]+)/(?P<action>[^/]+)/$', views.client_action), url(r'^client/(?P<client_id>[0-9]+)/(?P<action>[^/]+)/$', views.client_action), url(r'^named-client/([0-9]+)/$', views.client2, name="named.client"), ] urlpatterns = ns_patterns + [ # Unicode strings are permitted everywhere. url(r'^Юникод/(\w+)/$', views.client2, name="метка_оператора"), url(r'^Юникод/(?P<tag>\S+)/$', views.client2, name="метка_оператора_2"), # Test urls for namespaces and current_app url(r'ns1/', include((ns_patterns, 'app'), 'ns1')), url(r'ns2/', include((ns_patterns, 'app'))), ]
wdv4758h/ZipPy
refs/heads/master
lib-python/3/lib2to3/fixes/fix_standarderror.py
203
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for StandardError -> Exception.""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixStandarderror(fixer_base.BaseFix): BM_compatible = True PATTERN = """ 'StandardError' """ def transform(self, node, results): return Name("Exception", prefix=node.prefix)
djfye/RocketMap
refs/heads/develop
pogom/__init__.py
186
#!/usr/bin/python # -*- coding: utf-8 -*-
willingc/oh-mainline
refs/heads/master
vendor/packages/Django/tests/regressiontests/i18n/other/__init__.py
12133432
chriha/GistTerminal
refs/heads/master
libs/__init__.py
12133432
adamwiggins/cocos2d
refs/heads/master
samples/demo_transitions.py
4
# # cocos2d # http://cocos2d.org # import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) from cocos.director import director from cocos.layer import Layer, ColorLayer from cocos.scene import Scene from cocos.scenes.transitions import * from cocos.actions import * from cocos.sprite import Sprite import pyglet from pyglet import gl, font from pyglet.window import key class ControlLayer(Layer): is_event_handler = True #: enable pyglet's events def __init__( self ): super(ControlLayer, self).__init__() self.text_title = pyglet.text.Label("Transition Demos", font_size=32, x=5, y=director.get_window_size()[1], anchor_x=font.Text.LEFT, anchor_y=font.Text.TOP ) self.text_subtitle = pyglet.text.Label( transition_list[current_transition].__name__, font_size=18, multiline=True, width=600, x=5, y=director.get_window_size()[1] - 80, anchor_x=font.Text.LEFT, anchor_y=font.Text.TOP ) self.text_help = pyglet.text.Label("Press LEFT / RIGHT for prev/next test, ENTER to restart test", font_size=16, x=director.get_window_size()[0] /2, y=20, anchor_x=font.Text.CENTER, anchor_y=font.Text.CENTER) def draw( self ): self.text_title.draw() self.text_subtitle.draw() self.text_help.draw() def on_key_press( self, k , m ): global current_transition, control_p if k == key.LEFT: current_transition = (current_transition-1)%len(transition_list) elif k == key.RIGHT: current_transition = (current_transition+1)%len(transition_list) elif k == key.ENTER: director.replace( transition_list[current_transition]( (control_list[(control_p+1)%len(control_list)] ), 1.25) ) control_p = (control_p + 1) % len(control_list) return True if k in (key.LEFT, key.RIGHT ): self.text_subtitle.text = transition_list[current_transition].__name__ class GrossiniLayer(Layer): def __init__( self ): super( GrossiniLayer, self ).__init__() g = Sprite( 'grossini.png') g.position = (320,240) rot = RotateBy( 360, 4 ) g.do( Repeat( rot + Reverse(rot) ) ) self.add( g ) class GrossiniLayer2(Layer): def __init__( self ): super( GrossiniLayer2, self ).__init__() rot = Rotate( 360, 5 ) g1 = Sprite( 'grossinis_sister1.png' ) g1.position = (490,240) g2 = Sprite( 'grossinis_sister2.png' ) g2.position = (140,240) g1.do( Repeat( rot + Reverse(rot) ) ) g2.do( Repeat( rot + Reverse(rot) ) ) self.add( g1 ) self.add( g2 ) if __name__ == "__main__": director.init(resizable=True) # director.window.set_fullscreen(True) transition_list = [ # ActionTransitions RotoZoomTransition, JumpZoomTransition, SplitColsTransition, SplitRowsTransition, MoveInLTransition, MoveInRTransition, MoveInBTransition, MoveInTTransition, SlideInLTransition, SlideInRTransition, SlideInBTransition, SlideInTTransition, FlipX3DTransition, FlipY3DTransition, FlipAngular3DTransition, ShuffleTransition, ShrinkGrowTransition, CornerMoveTransition, EnvelopeTransition, FadeTRTransition, FadeBLTransition, FadeUpTransition, FadeDownTransition, TurnOffTilesTransition, FadeTransition, ZoomTransition, ] current_transition = 0 g = GrossiniLayer() g2 = GrossiniLayer2() c2 = ColorLayer(128,16,16,255) c1 = ColorLayer(0,255,255,255) control1 = ControlLayer() control2 = ControlLayer() controlScene1 = Scene( c2, g, control2 ) controlScene2 = Scene( c1, g2, control2 ) control_p = 0 control_list = [controlScene1, controlScene2] director.run( controlScene1 )
batiste/django-page-cms
refs/heads/master
pages/testproj/documents/views.py
1
from django.shortcuts import render from pages.testproj.documents.models import Document def document_view(request, *args, **kwargs): context = kwargs if kwargs.get('current_page', False): documents = Document.objects.filter(page=kwargs['current_page']) context['documents'] = documents if 'document_id' in kwargs: document = Document.objects.get(pk=int(kwargs['document_id'])) context['document'] = document context['in_document_view'] = True return render( request, 'pages/examples/index.html', context)
RX14/autokey
refs/heads/master
src/lib/common.py
47
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 Chris Dekter # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os.path, dbus.service CONFIG_DIR = os.path.expanduser("~/.config/autokey") LOCK_FILE = CONFIG_DIR + "/autokey.pid" LOG_FILE = CONFIG_DIR + "/autokey.log" MAX_LOG_SIZE = 5 * 1024 * 1024 # 5 megabytes MAX_LOG_COUNT = 3 LOG_FORMAT = "%(asctime)s %(levelname)s - %(name)s - %(message)s" APP_NAME = "autokey" CATALOG = "" VERSION = "0.90.4" HOMEPAGE = "http://autokey.googlecode.com/" BUG_EMAIL = "cdekter@gmail.com" FAQ_URL = "http://code.google.com/p/autokey/wiki/FAQ" API_URL = "http://autokey.googlecode.com/svn/trunk/doc/scripting/index.html" HELP_URL = "http://code.google.com/p/autokey/w/list" DONATE_URL = "https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=L333CPRZ6J8JC" BUG_URL = "http://code.google.com/p/autokey/issues/entry" ICON_FILE = "autokey" ICON_FILE_NOTIFICATION = "autokey-status" ICON_FILE_NOTIFICATION_DARK = "autokey-status-dark" ICON_FILE_NOTIFICATION_ERROR = "autokey-status-error" USING_QT = False class AppService(dbus.service.Object): def __init__(self, app): busName = dbus.service.BusName('org.autokey.Service', bus=dbus.SessionBus()) dbus.service.Object.__init__(self, busName, "/AppService") self.app = app @dbus.service.method(dbus_interface='org.autokey.Service', in_signature='', out_signature='') def show_configure(self): self.app.show_configure() @dbus.service.method(dbus_interface='org.autokey.Service', in_signature='s', out_signature='') def run_script(self, name): self.app.service.run_script(name) @dbus.service.method(dbus_interface='org.autokey.Service', in_signature='s', out_signature='') def run_phrase(self, name): self.app.service.run_phrase(name) @dbus.service.method(dbus_interface='org.autokey.Service', in_signature='s', out_signature='') def run_folder(self, name): self.app.service.run_folder(name)
jnbek/update_mirrors
refs/heads/master
update_mirrors.py
1
#!/usr/bin/env python import os import shlex import subprocess import multiprocessing mirrors = { 'pacbsd' : { 'args' : '-azzrpP --delete', 'url' : 'rsync.pacbsd.org::Repository', }, 'cpan' : { 'args' : '-arpP --delete', 'url' : 'cpan-rsync.perl.org::CPAN', }, 'gnu' : { 'args' : '-rltpHS --progress --delete-excluded', 'url' : 'rsync://mirrors.ocf.berkeley.edu/gnu/', }, 'nongnu' : { 'args' : '-rltpHS --progress --delete-excluded', 'url' : 'rsync://dl.sv.gnu.org/releases/', }, 'opencsw' : { 'args' : '-aH --progress --delete', 'url' : 'rsync://rsync.opencsw.org/opencsw/', }, 'ietf/internet-drafts' : { 'args' : '-avzz --progress --delete', 'url' : 'rsync.ietf.org::internet-drafts', }, 'ietf/rfc' : { 'args' : '-avzz --progress', 'url' : 'rsync.ietf.org::rfc', }, 'openindiana/dlc': { 'args' : '-av --delete --progress', 'url' : 'dlc-origin.openindiana.org::dlc', }, 'openindiana/pkg/dev' : { 'args' : '-av --delete --progress', 'url' : 'pkg-origin.openindiana.org::pkgdepot-dev', }, 'archhurd/repos' : { 'args' : '-arpP --delete', 'url' : 'rsync.archhurd.org::repos', }, 'archhurd/livecd' : { 'args' : '-arpP --delete', 'url' : 'rsync.archhurd.org::livecd', }, 'archhurd/abs' : { 'args' : '-arpP --delete', 'url' : 'rsync.archhurd.org::abs', }, } max_thread = 4 lock_file = '/tmp/update_mirrors.lock' base_path = '/share/www/mirrors/' # need trailing / def which(program): for p in os.environ['PATH'].split(':'): fullpath = os.path.join(p,program) if os.path.exists(fullpath): return fullpath def build_cmd(): cmd_list = [] rsync_path = which("rsync") for path in mirrors: dest = str.join('',[base_path, path]) args = str.join(chr(32), [rsync_path, mirrors[path]['args'], mirrors[path]['url'],dest]) cmd_list.append(args) return cmd_list def rsync(cmd): pid = os.getpid() print("Starting PID {0} {1}".format(pid, cmd)) command = shlex.split(cmd) subprocess.call(command) print("Finishing PID {0} {1}".format(pid, cmd)) if __name__ == '__main__': if os.path.isfile(lock_file): print("Already running update_mirrors or stale lockfile found: Exiting") exit(1) lckf = open(lock_file, 'w') cmds = build_cmd() p = multiprocessing.Pool(max_thread) p.map(rsync, cmds); print("Main thread completed") lckf.close() os.remove(lock_file)
jdnz/qml-rg
refs/heads/master
Meeting 7/image_loader_and_encoder.py
2
import math import os from skimage import io from skimage import transform as tf from matplotlib import pyplot as plt from skimage.transform import resize import numpy as np """This code loads the images from the APS capture and encodes them with a neural network in a lower dimensional vector. For 20 hidden layers and 1000 epochs the encoding is already quite good. (This can be seen by comparing the original and the encoded-decoded image) To get to even lower dimensions, we manually lower the dimension in the end. """ #---------------------------------------------------------------------------- # Load Images def load_images(folder): images = [] labels = [] for file in os.listdir(folder): if file.endswith(".png"): images.append(io.imread(folder + file)) if file.find("einstein") > -1 or file.find("curie") > -1: labels.append(1) else: labels.append(0) return images, labels #---------------------------------------------------------------------------- # Resize Images def prep_datas(xset, xlabels): X = list(xset) for i in range(len(X)): # reduce the size of the image from 100X100 to 32X32. Also flattens the # color levels X[i] = resize(X[i], (32, 32, 1)) X = [np.reshape(x, (1024,)) for x in X] # reshape list (1024,) is crucial Y = xlabels return X, Y #------------------------------------------------------------------------------ # Load and reshape Images with image_loader.py training_set, training_labels = load_images("images/train/") test_set, test_labels = load_images("images/test/") resize_set, resize_labels = prep_datas(training_set, training_labels) resize_test_set, resize_test_labels = prep_datas(test_set, test_labels) # print(resize_set) #--------------------------------------------------------------- # very ugly way to bring vectors to the right shape for SVC fit() a = [] for x in resize_set: a.append(x.tolist()) X = a from keras.models import Model, Sequential from keras.layers import Input, Dense from keras import regularizers #---------------------------------------------------------------- # Nice code from Keras example encoding_dim = 100 input_img = Input(shape=(1024,)) # add a Dense layer with a L1 activity regularizer encoded = Dense(encoding_dim, activation='relu')(input_img) decoded = Dense(1024, activation='sigmoid')(encoded) #---------------------------------------------------------------- # this model paps the input to the reconstruction autoencoder = Model(input_img, decoded) #---------------------------------------------------------------- # this model maps an input to its encoded representation encoder = Model(input_img, encoded) #---------------------------------------------------------------- # And the decoder # create a placeholder for an encoded (n-dimensional) input encoded_input = Input(shape=(encoding_dim,)) # retrieve the last layer of the autoencoder model decoder_layer = autoencoder.layers[-1] # create the decoder model decoder = Model(encoded_input, decoder_layer(encoded_input)) #------------------------------------------------------------------- # This is now the real training autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy') autoencoder.fit(X, X, epochs=1000, batch_size=10) autoencoder.save("save_model.h5") # save model #-------------------------------------------------------------------- # To use the model in the end we need these two functions # encode and decode some digits # note that we take them from the *test* set encoded_imgs = encoder.predict(X) decoded_imgs = decoder.predict(encoded_imgs) #--------------------------------------------------------------------- # NOW compare resized (32x32) image with encoded image # reshape vectors again to images decoded_imgs = [np.reshape(x, (32, 32)) for x in decoded_imgs] original_imgs = [np.reshape(x, (32, 32)) for x in X] # reshape vectors again to images for i in range(len(X)): decoded = np.asarray(decoded_imgs[i]) original = np.asarray(original_imgs[i]) plt.subplot(2, 1, 1) plt.imshow(decoded.squeeze(), cmap='gray') plt.subplot(2, 1, 2) plt.imshow(original.squeeze(), cmap='gray') plt.show() #------------------------------------------------------------------------ # The vector encoded_imgs now is the data we need for the SVD! # encoding_dim can be changed, depending on how many input dimensions we # want for the SVD # We can also reduce the dimensions again manually outputdim = 4 # what final size do we want length = len(encoded_imgs) # how many pictures output = [] for k in range(length): encoded = encoded_imgs[k] # which picture do we want to compress? list = [] for i in range(outputdim): list.append( sum([x for x in encoded[i:(i + int(encoding_dim / outputdim))]])) output.append(list) print(output)
zahari/samba
refs/heads/master
selftest/tests/__init__.py
13
# __init__.py -- The tests for selftest # Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 3 # of the License or (at your option) any later version of # the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """Tests for selftest.""" from testtools import TestCase import unittest def test_suite(): result = unittest.TestSuite() names = ['socket_wrapper', 'target', 'testlist', 'run', 'samba'] module_names = ['selftest.tests.test_' + name for name in names] loader = unittest.TestLoader() result.addTests(loader.loadTestsFromNames(module_names)) return result
TheWylieStCoyote/gnuradio
refs/heads/master
gnuradio-runtime/examples/mp-sched/synthetic.py
3
#!/usr/bin/env python # # Copyright 2008,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from __future__ import print_function from __future__ import division from __future__ import unicode_literals from gnuradio import gr, eng_notation from gnuradio import blocks, filter from gnuradio.eng_arg import eng_float, intx from argparse import ArgumentParser import os class pipeline(gr.hier_block2): def __init__(self, nstages, ntaps=256): """ Create a pipeline of nstages of filter.fir_filter_fff's connected in serial terminating in a blocks.null_sink. """ gr.hier_block2.__init__(self, "pipeline", gr.io_signature(1, 1, gr.sizeof_float), gr.io_signature(0, 0, 0)) taps = ntaps*[1.0 / ntaps] upstream = self for i in range(nstages): op = filter.fir_filter_fff(1, taps) self.connect(upstream, op) upstream = op self.connect(upstream, blocks.null_sink(gr.sizeof_float)) class top(gr.top_block): def __init__(self): gr.top_block.__init__(self) default_nsamples = 10e6 parser = ArgumentParser() parser.add_argument("-p", "--npipelines", type=intx, default=1, metavar="NPIPES", help="the number of pipelines to create (default=%(default)s)") parser.add_argument("-s", "--nstages", type=intx, default=1, metavar="NSTAGES", help="the number of stages in each pipeline (default=%(default)s)") parser.add_argument("-N", "--nsamples", type=eng_float, default=default_nsamples, help=("the number of samples to run through the graph (default=%s)" % (eng_notation.num_to_str(default_nsamples)))) parser.add_argument("-m", "--machine-readable", action="store_true", default=False, help="enable machine readable output") args = parser.parse_args() self.npipes = args.npipelines self.nstages = args.nstages self.nsamples = args.nsamples self.machine_readable = args.machine_readable ntaps = 256 # Something vaguely like floating point ops self.flop = 2 * ntaps * args.npipelines * args.nstages * args.nsamples src = blocks.null_source(gr.sizeof_float) head = blocks.head(gr.sizeof_float, int(args.nsamples)) self.connect(src, head) for n in range(args.npipelines): self.connect(head, pipeline(args.nstages, ntaps)) def time_it(tb): start = os.times() tb.run() stop = os.times() delta = list(map((lambda a, b: a-b), stop, start)) user, sys, childrens_user, childrens_sys, real = delta total_user = user + childrens_user total_sys = sys + childrens_sys if tb.machine_readable: print("%3d %3d %.3e %7.3f %7.3f %7.3f %7.3f %.6e %.3e" % ( tb.npipes, tb.nstages, tb.nsamples, real, total_user, total_sys, (total_user+total_sys) / real, tb.flop, tb.flop / real)) else: print("npipes %7d" % (tb.npipes,)) print("nstages %7d" % (tb.nstages,)) print("nsamples %s" % (eng_notation.num_to_str(tb.nsamples),)) print("real %7.3f" % (real,)) print("user %7.3f" % (total_user,)) print("sys %7.3f" % (total_sys,)) print("(user+sys)/real %7.3f" % ((total_user + total_sys) / real,)) print("pseudo_flop %s" % (eng_notation.num_to_str(tb.flop),)) print("pseudo_flop/real %s" % (eng_notation.num_to_str(tb.flop / real),)) if __name__ == "__main__": try: tb = top() time_it(tb) except KeyboardInterrupt: raise SystemExit(128)
aldanopolis/android_kernel_motorola_msm8226
refs/heads/cm-14.1
tools/perf/scripts/python/net_dropmonitor.py
4235
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms: if (i['loc'] >= loc): return (i['name'], i['loc']-loc) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
SatrioDwiPrabowo/Android_Alexa_Kernel_Nicki
refs/heads/master
tools/perf/scripts/python/net_dropmonitor.py
4235
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms: if (i['loc'] >= loc): return (i['name'], i['loc']-loc) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
adrientetar/robofab
refs/heads/python3-ufo3
Scripts/RoboFabIntro/demo_GlyphMath.py
9
#FLM: Fun with GlyphMath # this example is meant to run with the RoboFab Demo Font # as the Current Font. So, if you're doing this in FontLab # import the Demo Font UFO first. from robofab.world import CurrentFont from random import random f = CurrentFont() condensedLight = f["a#condensed_light"] wideLight = f["a#wide_light"] wideBold = f["a#wide_bold"] diff = wideLight - condensedLight destination = f.newGlyph("a#deltaexperiment") destination.clear() x = wideBold + (condensedLight-wideLight)*random() destination.appendGlyph( x) destination.width = x.width destination.update() f.update()
jocke-l/blues
refs/heads/master
blues/postgres.py
2
""" Postgres Blueprint ================== **Fabric environment:** .. code-block:: yaml blueprints: - blues.postgres settings: postgres: version: 9.3 # PostgreSQL version (required) schemas: some_schema_name: # The schema name user: foo # Username to connect to schema password: bar # Password to connect to schema (optional) """ import os from datetime import datetime from fabric.contrib import files from fabric.decorators import task from fabric.operations import prompt from fabric.state import env from refabric.api import run, info from refabric.context_managers import sudo, silent from refabric.contrib import blueprints from . import debian __all__ = ['start', 'stop', 'restart', 'reload', 'setup', 'configure', 'setup_schemas', 'generate_pgtune_conf', 'dump'] blueprint = blueprints.get(__name__) start = debian.service_task('postgresql', 'start') stop = debian.service_task('postgresql', 'stop') restart = debian.service_task('postgresql', 'restart') reload = debian.service_task('postgresql', 'reload') version = lambda: blueprint.get('version', '9.1') postgres_root = lambda *a: os.path.join('/etc/postgresql/{}/main/'.format(version()), *a) def install(): with sudo(): debian.apt_get('install', 'postgresql', 'postgresql-server-dev-{}'.format(version()), 'libpq-dev', 'postgresql-contrib-{}'.format(version()), 'pgtune') @task def setup(): """ Install, configure Postgresql and create schemas """ install() # Bump shared memory limits setup_shared_memory() # Generate pgtune.conf generate_pgtune_conf() # Upload templates configure() # Create schemas and related users setup_schemas() @task def configure(): """ Configure Postgresql """ updates = [blueprint.upload(os.path.join('.', 'pgtune.conf'), postgres_root()), blueprint.upload(os.path.join('.', 'postgresql-{}.conf'.format(version())), postgres_root('postgresql.conf'))] if any(updates): restart() @task def setup_schemas(drop=False): """ Create database schemas and grant user privileges :param drop: Drop existing schemas before creation """ schemas = blueprint.get('schemas', {}) with sudo('postgres'): for schema, config in schemas.iteritems(): user, password = config['user'], config.get('password') info('Creating user {}', user) if password: _client_exec("CREATE ROLE %(user)s WITH PASSWORD '%(password)s' LOGIN", user=user, password=password) else: _client_exec("CREATE ROLE %(user)s LOGIN", user=user) if drop: info('Droping schema {}', schema) _client_exec('DROP DATABASE %(name)s', name=schema) info('Creating schema {}', schema) _client_exec('CREATE DATABASE %(name)s', name=schema) info('Granting user {} to schema {}'.format(user, schema)) _client_exec("GRANT ALL PRIVILEGES ON DATABASE %(schema)s to %(user)s", schema=schema, user=user) def _client_exec(cmd, **kwargs): with sudo('postgres'): schema = kwargs.get('schema', 'template1') return run("echo \"%s;\" | psql -d %s" % (cmd % kwargs, schema)) def setup_shared_memory(): """ http://leopard.in.ua/2013/09/05/postgresql-sessting-shared-memory/ """ sysctl_path = '/etc/sysctl.conf' shmmax_configured = files.contains(sysctl_path, 'kernel.shmmax') shmall_configured = files.contains(sysctl_path, 'kernel.shmall') if not any([shmmax_configured, shmall_configured]): page_size = debian.page_size() phys_pages = debian.phys_pages() shmall = phys_pages / 2 shmmax = shmall * page_size shmmax_str = 'kernel.shmmax = {}'.format(shmmax) shmall_str = 'kernel.shmall = {}'.format(shmall) with sudo(): files.append(sysctl_path, shmmax_str, partial=True) files.append(sysctl_path, shmall_str, partial=True) run('sysctl -p') info('Added **{}** to {}', shmmax_str, sysctl_path) info('Added **{}** to {}', shmall_str, sysctl_path) @task def generate_pgtune_conf(role='db'): """ Run pgtune and create pgtune.conf :param role: Which fabric role to place local pgtune.conf template under """ conf_path = postgres_root('postgresql.conf') with sudo(), silent(): output = run('pgtune -T Web -i {}'.format(conf_path)).strip() def parse(c): lines = [l for l in c.splitlines() if '# pgtune' in l] for line in lines: try: comment = line.index('#') line = line[:comment] except ValueError: pass clean = lambda s: s.strip('\n\r\t\'" ') key, _, value = line.partition('=') key, value = map(clean, (key, value)) if key: yield key, value or None tune_conf = dict(parse(output)) tune_conf.update(blueprint.get('pgtune', {})) tune_conf = '\n'.join((' = '.join(item)) for item in tune_conf.iteritems()) conf_dir = os.path.join(os.path.dirname(env['real_fabfile']), 'templates', role, 'postgres') conf_path = os.path.join(conf_dir, 'pgtune.conf') if not os.path.exists(conf_dir): os.makedirs(conf_dir) with open(conf_path, 'w+') as f: f.write(tune_conf) @task def dump(schema=None): """ Dump and download all configured, or given, schemas. :param schema: Specific shema to dump and download. """ if not schema: schemas = blueprint.get('schemas', {}).keys() for i, schema in enumerate(schemas, start=1): print("{i}. {schema}".format(i=i, schema=schema)) valid_indices = '[1-{}]+'.format(len(schemas)) schema_choice = prompt('Select schema to dump:', default='1', validate=valid_indices) schema = schemas[int(schema_choice)-1] with sudo('postgres'): now = datetime.now().strftime('%Y-%m-%d') output_file = '/tmp/{}_{}.backup'.format(schema, now) filename = os.path.basename(output_file) options = dict( format='tar', output_file=output_file, schema=schema ) info('Dumping schema {}...', schema) run('pg_dump -c -F {format} -f {output_file} {schema}'.format(**options)) info('Downloading dump...') local_file = '~/{}'.format(filename) files.get(output_file, local_file) with sudo(), silent(): debian.rm(output_file) info('New smoking hot dump at {}', local_file)
brokenjacobs/ansible
refs/heads/devel
test/units/module_utils/basic/test_deprecate_warn.py
62
# -*- coding: utf-8 -*- # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import json import sys from ansible.compat.tests import unittest from units.mock.procenv import swap_stdin_and_argv, swap_stdout import ansible.module_utils.basic class TestAnsibleModuleWarnDeprecate(unittest.TestCase): """Test the AnsibleModule Warn Method""" def test_warn(self): args = json.dumps(dict(ANSIBLE_MODULE_ARGS={})) with swap_stdin_and_argv(stdin_data=args): with swap_stdout(): ansible.module_utils.basic._ANSIBLE_ARGS = None am = ansible.module_utils.basic.AnsibleModule( argument_spec=dict(), ) am._name = 'unittest' am.warn('warning1') with self.assertRaises(SystemExit): am.exit_json(warnings=['warning2']) self.assertEquals(json.loads(sys.stdout.getvalue())['warnings'], ['warning1', 'warning2']) def test_deprecate(self): args = json.dumps(dict(ANSIBLE_MODULE_ARGS={})) with swap_stdin_and_argv(stdin_data=args): with swap_stdout(): ansible.module_utils.basic._ANSIBLE_ARGS = None am = ansible.module_utils.basic.AnsibleModule( argument_spec=dict(), ) am._name = 'unittest' am.deprecate('deprecation1') am.deprecate('deprecation2', '2.3') with self.assertRaises(SystemExit): am.exit_json(deprecations=['deprecation3', ('deprecation4', '2.4')]) output = json.loads(sys.stdout.getvalue()) self.assertTrue('warnings' not in output or output['warnings'] == []) self.assertEquals(output['deprecations'], [ {u'msg': u'deprecation1', u'version': None}, {u'msg': u'deprecation2', u'version': '2.3'}, {u'msg': u'deprecation3', u'version': None}, {u'msg': u'deprecation4', u'version': '2.4'}, ]) def test_deprecate_without_list(self): args = json.dumps(dict(ANSIBLE_MODULE_ARGS={})) with swap_stdin_and_argv(stdin_data=args): with swap_stdout(): ansible.module_utils.basic._ANSIBLE_ARGS = None am = ansible.module_utils.basic.AnsibleModule( argument_spec=dict(), ) am._name = 'unittest' with self.assertRaises(SystemExit): am.exit_json(deprecations='Simple deprecation warning') output = json.loads(sys.stdout.getvalue()) self.assertTrue('warnings' not in output or output['warnings'] == []) self.assertEquals(output['deprecations'], [ {u'msg': u'Simple deprecation warning', u'version': None}, ])
RHInception/re-core
refs/heads/master
test/test_steps.py
2
# Copyright (C) 2014 SEE AUTHORS FILE # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from . import TestCase, unittest import recore.mongo class TestSteps(TestCase): def test_new_Step_simple(self): step = "frob:Nicate" s = recore.mongo.Step(step, '123456789abcdefg') self.assertEqual(s.step_name, "frob:Nicate") self.assertEqual(s.command, 'frob') self.assertEqual(s.subcommand, 'Nicate') self.assertEqual(str(s), "frob:Nicate") def test_new_Step_parameters(self): step = { "frob:Nicate": { "megafrob": True } } s = recore.mongo.Step(step, '123456789abcdefg') self.assertEqual(s.step_name, "frob:Nicate") self.assertEqual(s.command, 'frob') self.assertEqual(s.subcommand, 'Nicate') self.assertEqual(str(s), "frob:Nicate")
joopert/home-assistant
refs/heads/dev
homeassistant/components/maxcube/climate.py
4
"""Support for MAX! Thermostats via MAX! Cube.""" import logging import socket from maxcube.device import ( MAX_DEVICE_MODE_AUTOMATIC, MAX_DEVICE_MODE_MANUAL, MAX_DEVICE_MODE_VACATION, MAX_DEVICE_MODE_BOOST, ) from homeassistant.components.climate import ClimateDevice from homeassistant.components.climate.const import ( HVAC_MODE_AUTO, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from . import DATA_KEY _LOGGER = logging.getLogger(__name__) PRESET_MANUAL = "manual" PRESET_BOOST = "boost" PRESET_VACATION = "vacation" SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE def setup_platform(hass, config, add_entities, discovery_info=None): """Iterate through all MAX! Devices and add thermostats.""" devices = [] for handler in hass.data[DATA_KEY].values(): cube = handler.cube for device in cube.devices: name = "{} {}".format(cube.room_by_id(device.room_id).name, device.name) if cube.is_thermostat(device) or cube.is_wallthermostat(device): devices.append(MaxCubeClimate(handler, name, device.rf_address)) if devices: add_entities(devices) class MaxCubeClimate(ClimateDevice): """MAX! Cube ClimateDevice.""" def __init__(self, handler, name, rf_address): """Initialize MAX! Cube ClimateDevice.""" self._name = name self._operation_list = [HVAC_MODE_AUTO] self._rf_address = rf_address self._cubehandle = handler @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_FLAGS @property def should_poll(self): """Return the polling state.""" return True @property def name(self): """Return the name of the climate device.""" return self._name @property def min_temp(self): """Return the minimum temperature.""" device = self._cubehandle.cube.device_by_rf(self._rf_address) return self.map_temperature_max_hass(device.min_temperature) @property def max_temp(self): """Return the maximum temperature.""" device = self._cubehandle.cube.device_by_rf(self._rf_address) return self.map_temperature_max_hass(device.max_temperature) @property def temperature_unit(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def current_temperature(self): """Return the current temperature.""" device = self._cubehandle.cube.device_by_rf(self._rf_address) # Map and return current temperature return self.map_temperature_max_hass(device.actual_temperature) @property def hvac_mode(self): """Return current operation (auto, manual, boost, vacation).""" return HVAC_MODE_AUTO @property def hvac_modes(self): """Return the list of available operation modes.""" return self._operation_list @property def target_temperature(self): """Return the temperature we try to reach.""" device = self._cubehandle.cube.device_by_rf(self._rf_address) return self.map_temperature_max_hass(device.target_temperature) def set_temperature(self, **kwargs): """Set new target temperatures.""" if kwargs.get(ATTR_TEMPERATURE) is None: return False target_temperature = kwargs.get(ATTR_TEMPERATURE) device = self._cubehandle.cube.device_by_rf(self._rf_address) cube = self._cubehandle.cube with self._cubehandle.mutex: try: cube.set_target_temperature(device, target_temperature) except (socket.timeout, socket.error): _LOGGER.error("Setting target temperature failed") return False @property def preset_mode(self): """Return the current preset mode.""" device = self._cubehandle.cube.device_by_rf(self._rf_address) return self.map_mode_max_hass(device.mode) @property def preset_modes(self): """Return available preset modes.""" return [PRESET_BOOST, PRESET_MANUAL, PRESET_VACATION] def set_preset_mode(self, preset_mode): """Set new operation mode.""" device = self._cubehandle.cube.device_by_rf(self._rf_address) mode = self.map_mode_hass_max(preset_mode) or MAX_DEVICE_MODE_AUTOMATIC with self._cubehandle.mutex: try: self._cubehandle.cube.set_mode(device, mode) except (socket.timeout, socket.error): _LOGGER.error("Setting operation mode failed") return False def update(self): """Get latest data from MAX! Cube.""" self._cubehandle.update() @staticmethod def map_temperature_max_hass(temperature): """Map Temperature from MAX! to HASS.""" if temperature is None: return 0.0 return temperature @staticmethod def map_mode_hass_max(mode): """Map Home Assistant Operation Modes to MAX! Operation Modes.""" if mode == PRESET_MANUAL: mode = MAX_DEVICE_MODE_MANUAL elif mode == PRESET_VACATION: mode = MAX_DEVICE_MODE_VACATION elif mode == PRESET_BOOST: mode = MAX_DEVICE_MODE_BOOST else: mode = None return mode @staticmethod def map_mode_max_hass(mode): """Map MAX! Operation Modes to Home Assistant Operation Modes.""" if mode == MAX_DEVICE_MODE_MANUAL: operation_mode = PRESET_MANUAL elif mode == MAX_DEVICE_MODE_VACATION: operation_mode = PRESET_VACATION elif mode == MAX_DEVICE_MODE_BOOST: operation_mode = PRESET_BOOST else: operation_mode = None return operation_mode
samapriya/External.Asset-EE.Pipeline
refs/heads/master
gee_asset_manager/batch_remover.py
3
import fnmatch import logging import sys import ee def delete(asset_path): root = asset_path[:asset_path.rfind('/')] all_assets_names = [e['id'] for e in ee.data.getList({'id': root})] filtered_names = fnmatch.filter(all_assets_names, asset_path) if not filtered_names: logging.warning('Nothing to remove. Exiting.') sys.exit(1) else: for path in filtered_names: __delete_recursive(path) logging.info('Collection %s removed', path) def __delete_recursive(asset_path): info = ee.data.getInfo(asset_path) if not info: logging.warning('Nothing to delete.') sys.exit(1) elif info['type'] == 'Image': pass elif info['type'] == 'Folder': items_in_destination = ee.data.getList({'id': asset_path}) for item in items_in_destination: logging.info('Removing items in %s folder', item['id']) delete(item['id']) else: items_in_destination = ee.data.getList({'id': asset_path}) for item in items_in_destination: ee.data.deleteAsset(item['id']) ee.data.deleteAsset(asset_path)
yakky/django
refs/heads/master
django/conf/locale/da/__init__.py
12133432
reyrodrigues/EU-SMS
refs/heads/master
temba/msgs/migrations/0027_create_outgoing_indexes.py
3
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations # language=SQL CREATE_SQL = """ DROP INDEX IF EXISTS msgs_msg_org_failed_created_on; DROP INDEX IF EXISTS msgs_msg_sent_label; DROP INDEX IF EXISTS msgs_msg_outbox_label; DROP INDEX IF EXISTS msgs_msg_failed_label; CREATE INDEX msgs_msg_outbox_label ON msgs_msg(org_id, created_on DESC) WHERE direction = 'O' AND visibility = 'V' AND status IN ('P', 'Q'); CREATE INDEX msgs_msg_sent_label ON msgs_msg(org_id, created_on DESC) WHERE direction = 'O' AND visibility = 'V' AND status IN ('W', 'S', 'D'); CREATE INDEX msgs_msg_failed_label ON msgs_msg(org_id, created_on DESC) WHERE direction = 'O' AND visibility = 'V' AND status = 'F'; """ # language=SQL DROP_SQL = """ DROP INDEX msgs_msg_sent_label; DROP INDEX msgs_msg_outbox_label; DROP INDEX msgs_msg_failed_label; CREATE INDEX msgs_msg_org_failed_created_on ON msgs_msg(org_id, direction, visibility, created_on DESC) WHERE status = 'F'; """ class Migration(migrations.Migration): dependencies = [ ('msgs', '0026_system_label_triggers'), ] operations = [ migrations.RunSQL(CREATE_SQL, DROP_SQL) ]
krischer/python-future
refs/heads/master
src/future/backports/email/mime/audio.py
82
# Copyright (C) 2001-2007 Python Software Foundation # Author: Anthony Baxter # Contact: email-sig@python.org """Class representing audio/* type MIME documents.""" from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import __all__ = ['MIMEAudio'] import sndhdr from io import BytesIO from future.backports.email import encoders from future.backports.email.mime.nonmultipart import MIMENonMultipart _sndhdr_MIMEmap = {'au' : 'basic', 'wav' :'x-wav', 'aiff':'x-aiff', 'aifc':'x-aiff', } # There are others in sndhdr that don't have MIME types. :( # Additional ones to be added to sndhdr? midi, mp3, realaudio, wma?? def _whatsnd(data): """Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix. """ hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None class MIMEAudio(MIMENonMultipart): """Class for generating audio/* MIME documents.""" def __init__(self, _audiodata, _subtype=None, _encoder=encoders.encode_base64, **_params): """Create an audio/* type MIME document. _audiodata is a string containing the raw audio data. If this data can be decoded by the standard Python `sndhdr' module, then the subtype will be automatically included in the Content-Type header. Otherwise, you can specify the specific audio subtype via the _subtype parameter. If _subtype is not given, and no subtype can be guessed, a TypeError is raised. _encoder is a function which will perform the actual encoding for transport of the image data. It takes one argument, which is this Image instance. It should use get_payload() and set_payload() to change the payload to the encoded form. It should also add any Content-Transfer-Encoding or other headers to the message as necessary. The default encoding is Base64. Any additional keyword arguments are passed to the base class constructor, which turns them into parameters on the Content-Type header. """ if _subtype is None: _subtype = _whatsnd(_audiodata) if _subtype is None: raise TypeError('Could not find audio MIME subtype') MIMENonMultipart.__init__(self, 'audio', _subtype, **_params) self.set_payload(_audiodata) _encoder(self)
buckets1337/MotherMUD
refs/heads/master
passlib/utils/pbkdf2.py
23
"""passlib.pbkdf2 - PBKDF2 support this module is getting increasingly poorly named. maybe rename to "kdf" since it's getting more key derivation functions added. """ #============================================================================= # imports #============================================================================= # core import hashlib import logging; log = logging.getLogger(__name__) import re from struct import pack from warnings import warn # site try: from M2Crypto import EVP as _EVP except ImportError: _EVP = None # pkg from passlib.exc import PasslibRuntimeWarning, ExpectedTypeError from passlib.utils import join_bytes, to_native_str, bytes_to_int, int_to_bytes, join_byte_values from passlib.utils.compat import b, bytes, BytesIO, irange, callable, int_types # local __all__ = [ "get_prf", "pbkdf1", "pbkdf2", ] #============================================================================= # hash helpers #============================================================================= # known hash names _nhn_formats = dict(hashlib=0, iana=1) _nhn_hash_names = [ # (hashlib/ssl name, iana name or standin, ... other known aliases) # hashes with official IANA-assigned names # (as of 2012-03 - http://www.iana.org/assignments/hash-function-text-names) ("md2", "md2"), ("md5", "md5"), ("sha1", "sha-1"), ("sha224", "sha-224", "sha2-224"), ("sha256", "sha-256", "sha2-256"), ("sha384", "sha-384", "sha2-384"), ("sha512", "sha-512", "sha2-512"), # hashlib/ssl-supported hashes without official IANA names, # hopefully compatible stand-ins have been chosen. ("md4", "md4"), ("sha", "sha-0", "sha0"), ("ripemd", "ripemd"), ("ripemd160", "ripemd-160"), ] # cache for norm_hash_name() _nhn_cache = {} def norm_hash_name(name, format="hashlib"): """Normalize hash function name :arg name: Original hash function name. This name can be a Python :mod:`~hashlib` digest name, a SCRAM mechanism name, IANA assigned hash name, etc. Case is ignored, and underscores are converted to hyphens. :param format: Naming convention to normalize to. Possible values are: * ``"hashlib"`` (the default) - normalizes name to be compatible with Python's :mod:`!hashlib`. * ``"iana"`` - normalizes name to IANA-assigned hash function name. for hashes which IANA hasn't assigned a name for, issues a warning, and then uses a heuristic to give a "best guess". :returns: Hash name, returned as native :class:`!str`. """ # check cache try: idx = _nhn_formats[format] except KeyError: raise ValueError("unknown format: %r" % (format,)) try: return _nhn_cache[name][idx] except KeyError: pass orig = name # normalize input if not isinstance(name, str): name = to_native_str(name, 'utf-8', 'hash name') name = re.sub("[_ /]", "-", name.strip().lower()) if name.startswith("scram-"): name = name[6:] if name.endswith("-plus"): name = name[:-5] # look through standard names and known aliases def check_table(name): for row in _nhn_hash_names: if name in row: _nhn_cache[orig] = row return row[idx] result = check_table(name) if result: return result # try to clean name up, and recheck table m = re.match("^(?P<name>[a-z]+)-?(?P<rev>\d)?-?(?P<size>\d{3,4})?$", name) if m: name, rev, size = m.group("name", "rev", "size") if rev: name += rev if size: name += "-" + size result = check_table(name) if result: return result # else we've done what we can warn("norm_hash_name(): unknown hash: %r" % (orig,), PasslibRuntimeWarning) name2 = name.replace("-", "") row = _nhn_cache[orig] = (name2, name) return row[idx] # TODO: get_hash() func which wraps norm_hash_name(), hashlib.<attr>, and hashlib.new #============================================================================= # general prf lookup #============================================================================= _BNULL = b('\x00') _XY_DIGEST = b(',\x1cb\xe0H\xa5\x82M\xfb>\xd6\x98\xef\x8e\xf9oQ\x85\xa3i') _trans_5C = join_byte_values((x ^ 0x5C) for x in irange(256)) _trans_36 = join_byte_values((x ^ 0x36) for x in irange(256)) def _get_hmac_prf(digest): "helper to return HMAC prf for specific digest" def tag_wrapper(prf): prf.__name__ = "hmac_" + digest prf.__doc__ = ("hmac_%s(key, msg) -> digest;" " generated by passlib.utils.pbkdf2.get_prf()" % digest) if _EVP and digest == "sha1": # use m2crypto function directly for sha1, since that's it's default digest try: result = _EVP.hmac(b('x'),b('y')) except ValueError: # pragma: no cover pass else: if result == _XY_DIGEST: return _EVP.hmac, 20 # don't expect to ever get here, but will fall back to pure-python if we do. warn("M2Crypto.EVP.HMAC() returned unexpected result " # pragma: no cover -- sanity check "during Passlib self-test!", PasslibRuntimeWarning) elif _EVP: # use m2crypto if it's present and supports requested digest try: result = _EVP.hmac(b('x'), b('y'), digest) except ValueError: pass else: # it does. so use M2Crypto's hmac & digest code hmac_const = _EVP.hmac def prf(key, msg): return hmac_const(key, msg, digest) digest_size = len(result) tag_wrapper(prf) return prf, digest_size # fall back to hashlib-based implementation digest_const = getattr(hashlib, digest, None) if not digest_const: raise ValueError("unknown hash algorithm: %r" % (digest,)) tmp = digest_const() block_size = tmp.block_size assert block_size >= 16, "unacceptably low block size" digest_size = tmp.digest_size del tmp def prf(key, msg): # simplified version of stdlib's hmac module if len(key) > block_size: key = digest_const(key).digest() key += _BNULL * (block_size - len(key)) tmp = digest_const(key.translate(_trans_36) + msg).digest() return digest_const(key.translate(_trans_5C) + tmp).digest() tag_wrapper(prf) return prf, digest_size # cache mapping prf name/func -> (func, digest_size) _prf_cache = {} def _clear_prf_cache(): "helper for unit tests" _prf_cache.clear() def get_prf(name): """lookup pseudo-random family (prf) by name. :arg name: this must be the name of a recognized prf. currently this only recognizes names with the format :samp:`hmac-{digest}`, where :samp:`{digest}` is the name of a hash function such as ``md5``, ``sha256``, etc. this can also be a callable with the signature ``prf(secret, message) -> digest``, in which case it will be returned unchanged. :raises ValueError: if the name is not known :raises TypeError: if the name is not a callable or string :returns: a tuple of :samp:`({func}, {digest_size})`. * :samp:`{func}` is a function implementing the specified prf, and has the signature ``func(secret, message) -> digest``. * :samp:`{digest_size}` is an integer indicating the number of bytes the function returns. usage example:: >>> from passlib.utils.pbkdf2 import get_prf >>> hmac_sha256, dsize = get_prf("hmac-sha256") >>> hmac_sha256 <function hmac_sha256 at 0x1e37c80> >>> dsize 32 >>> digest = hmac_sha256('password', 'message') this function will attempt to return the fastest implementation it can find; if M2Crypto is present, and supports the specified prf, :func:`M2Crypto.EVP.hmac` will be used behind the scenes. """ global _prf_cache if name in _prf_cache: return _prf_cache[name] if isinstance(name, str): if name.startswith("hmac-") or name.startswith("hmac_"): retval = _get_hmac_prf(name[5:]) else: raise ValueError("unknown prf algorithm: %r" % (name,)) elif callable(name): # assume it's a callable, use it directly digest_size = len(name(b('x'),b('y'))) retval = (name, digest_size) else: raise ExpectedTypeError(name, "str or callable", "prf name") _prf_cache[name] = retval return retval #============================================================================= # pbkdf1 support #============================================================================= def pbkdf1(secret, salt, rounds, keylen=None, hash="sha1"): """pkcs#5 password-based key derivation v1.5 :arg secret: passphrase to use to generate key :arg salt: salt string to use when generating key :param rounds: number of rounds to use to generate key :arg keylen: number of bytes to generate (if ``None``, uses digest's native size) :param hash: hash function to use. must be name of a hash recognized by hashlib. :returns: raw bytes of generated key .. note:: This algorithm has been deprecated, new code should use PBKDF2. Among other limitations, ``keylen`` cannot be larger than the digest size of the specified hash. """ # validate secret & salt if not isinstance(secret, bytes): raise ExpectedTypeError(secret, "bytes", "secret") if not isinstance(salt, bytes): raise ExpectedTypeError(salt, "bytes", "salt") # validate rounds if not isinstance(rounds, int_types): raise ExpectedTypeError(rounds, "int", "rounds") if rounds < 1: raise ValueError("rounds must be at least 1") # resolve hash try: hash_const = getattr(hashlib, hash) except AttributeError: # check for ssl hash # NOTE: if hash unknown, new() will throw ValueError, which we'd just # reraise anyways; so instead of checking, we just let it get # thrown during first use, below # TODO: use builtin md4 class if hashlib doesn't have it. def hash_const(msg): return hashlib.new(hash, msg) # prime pbkdf1 loop, get block size block = hash_const(secret + salt).digest() # validate keylen if keylen is None: keylen = len(block) elif not isinstance(keylen, int_types): raise ExpectedTypeError(keylen, "int or None", "keylen") elif keylen < 0: raise ValueError("keylen must be at least 0") elif keylen > len(block): raise ValueError("keylength too large for digest: %r > %r" % (keylen, len(block))) # main pbkdf1 loop for _ in irange(rounds-1): block = hash_const(block).digest() return block[:keylen] #============================================================================= # pbkdf2 #============================================================================= MAX_BLOCKS = 0xffffffff # 2**32-1 MAX_HMAC_SHA1_KEYLEN = MAX_BLOCKS*20 # NOTE: the pbkdf2 spec does not specify a maximum number of rounds. # however, many of the hashes in passlib are currently clamped # at the 32-bit limit, just for sanity. once realistic pbkdf2 rounds # start approaching 24 bits, this limit will be raised. def pbkdf2(secret, salt, rounds, keylen=None, prf="hmac-sha1"): """pkcs#5 password-based key derivation v2.0 :arg secret: passphrase to use to generate key :arg salt: salt string to use when generating key :param rounds: number of rounds to use to generate key :arg keylen: number of bytes to generate. if set to ``None``, will use digest size of selected prf. :param prf: psuedo-random family to use for key strengthening. this can be any string or callable accepted by :func:`get_prf`. this defaults to ``"hmac-sha1"`` (the only prf explicitly listed in the PBKDF2 specification) :returns: raw bytes of generated key """ # validate secret & salt if not isinstance(secret, bytes): raise ExpectedTypeError(secret, "bytes", "secret") if not isinstance(salt, bytes): raise ExpectedTypeError(salt, "bytes", "salt") # validate rounds if not isinstance(rounds, int_types): raise ExpectedTypeError(rounds, "int", "rounds") if rounds < 1: raise ValueError("rounds must be at least 1") # validate keylen if keylen is not None: if not isinstance(keylen, int_types): raise ExpectedTypeError(keylen, "int or None", "keylen") elif keylen < 0: raise ValueError("keylen must be at least 0") # special case for m2crypto + hmac-sha1 if prf == "hmac-sha1" and _EVP: if keylen is None: keylen = 20 # NOTE: doing check here, because M2crypto won't take 'long' instances # (which this is when running under 32bit) if keylen > MAX_HMAC_SHA1_KEYLEN: raise ValueError("key length too long for digest") # NOTE: as of 2012-4-4, m2crypto has buffer overflow issue # which may cause segfaults if keylen > 32 (EVP_MAX_KEY_LENGTH). # therefore we're avoiding m2crypto for large keys until that's fixed. # see https://bugzilla.osafoundation.org/show_bug.cgi?id=13052 if keylen < 32: return _EVP.pbkdf2(secret, salt, rounds, keylen) # resolve prf prf_func, digest_size = get_prf(prf) if keylen is None: keylen = digest_size # figure out how many blocks we'll need block_count = (keylen+digest_size-1)//digest_size if block_count >= MAX_BLOCKS: raise ValueError("key length too long for digest") # build up result from blocks def gen(): for i in irange(block_count): digest = prf_func(secret, salt + pack(">L", i+1)) accum = bytes_to_int(digest) for _ in irange(rounds-1): digest = prf_func(secret, digest) accum ^= bytes_to_int(digest) yield int_to_bytes(accum, digest_size) return join_bytes(gen())[:keylen] #============================================================================= # eof #=============================================================================
tedi3231/openerp
refs/heads/master
build/lib/openerp/addons/stock_no_autopicking/__init__.py
69
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import stock_no_autopicking # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
alextruberg/custom_django
refs/heads/master
tests/staticfiles_tests/urls/default.py
176
from django.conf.urls import patterns, url urlpatterns = patterns('', url(r'^static/(?P<path>.*)$', 'django.contrib.staticfiles.views.serve'), )
asi1024/competitive-library
refs/heads/master
cpp/docs/title.py
2
#!/usr/bin/env python import collections import json import os max_src_len = 3 def category(path, name, verifier): def ext(fname): return fname.split('.')[-1] def extract_ext(fname): return '.'.join(fname.split('.')[:-1]) def get_relpath(path, start): return os.path.normpath(os.path.relpath(path, start)) def sort_rank(src_name): if os.path.dirname(src_name).find('tests') != -1: return 0 elif ext(src_name) == 'cpp': return 1 else: return 2 def sort_src(src_list): src_with_rank = [(sort_rank(_), _) for _ in src_list] src_with_rank.sort() return [name for _, name in src_with_rank] try: files = [f.strip() for f in os.listdir(path)] if not files: raise os.FileNotFoundError except os.FileNotFoundError: return files_ext = [(0 if ext(f) == 'hpp' else 1, f) for f in files if ext(f) in ('hpp', 'cpp')] files_ext.sort() print('## ' + name) print('') print('| Algorithm | Verified | AOJ Problems |') print('|:---------:|:--------:|:-------------|') for _, fname in files_ext: algorithm = '[{}](./{}/{})'.format( fname, get_relpath(path, 'cpp'), extract_ext(fname)) fpath = path + '/' + fname if fpath in verifier: validated = '<font color="ForestGreen">Yes</font>' src_list = ['[{}](./{})'.format( os.path.basename(src_path), extract_ext(get_relpath(src_path, 'cpp'))) for src_path in sort_src(verifier[fpath])] if len(src_list) > max_src_len: src_str = '<br>'.join(src_list[:max_src_len]) + ' etc...' else: src_str = '<br>'.join(src_list) else: validated = '<font color="Red">No</font>' src_str = '' print('| {} | {} | {} |'.format(algorithm, validated, src_str)) print('') def get_verifier_dict(): memo_set = set() res = {} def page(path): if path in memo_set: return memo_set.add(path) for s in open(path): s = s.strip() if s.startswith('#include') and s.find('"') != -1: relpath = s.split('"')[1] key = os.path.normpath(os.path.dirname(path) + '/' + relpath) if key not in res: res[key] = [] res[key].append(path) page(key) def directory(path): for fname in os.listdir(path): if os.path.isdir(path + '/' + fname): directory(path + '/' + fname) elif fname.endswith('.cpp') or fname.endswith('.hpp'): page(path + '/' + fname) directory('cpp/tests') return res if __name__ == '__main__': f = open('cpp/include/TITLE.json', 'r') print('---') print('title: C++') print('---') decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict) json = decoder.decode(''.join(f.readlines())) verifier_dict = get_verifier_dict() f.close() for key, value in json.items(): category('cpp/include/{}'.format(key), value, verifier_dict)
tfroehlich82/erpnext
refs/heads/develop
erpnext/non_profit/doctype/chapter/chapter.py
3
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.website.website_generator import WebsiteGenerator class Chapter(WebsiteGenerator): _website = frappe._dict( condition_field = "published", ) def get_context(self, context): context.no_cache = True context.show_sidebar = True context.parents = [dict(label='View All Chapters', route='chapters', title='View Chapters')] def validate(self): if not self.route: #pylint: disable=E0203 self.route = 'chapters/' + self.scrub(self.name) def enable(self): chapter = frappe.get_doc('Chapter', frappe.form_dict.name) chapter.append('members', dict(enable=self.value)) chapter.save(ignore_permissions=1) frappe.db.commit() def get_list_context(context): context.allow_guest = True context.no_cache = True context.show_sidebar = True context.title = 'All Chapters' context.no_breadcrumbs = True context.order_by = 'creation desc' context.introduction = '<p>All Chapters</p>'
stickyd/libgdx
refs/heads/master
extensions/gdx-freetype/jni/freetype-2.5.5/src/tools/glnames.py
360
#!/usr/bin/env python # # # FreeType 2 glyph name builder # # Copyright 1996-2000, 2003, 2005, 2007, 2008, 2011 by # David Turner, Robert Wilhelm, and Werner Lemberg. # # This file is part of the FreeType project, and may only be used, modified, # and distributed under the terms of the FreeType project license, # LICENSE.TXT. By continuing to use, modify, or distribute this file you # indicate that you have read the license and understand and accept it # fully. """\ usage: %s <output-file> This python script generates the glyph names tables defined in the `psnames' module. Its single argument is the name of the header file to be created. """ import sys, string, struct, re, os.path # This table lists the glyphs according to the Macintosh specification. # It is used by the TrueType Postscript names table. # # See # # http://fonts.apple.com/TTRefMan/RM06/Chap6post.html # # for the official list. # mac_standard_names = \ [ # 0 ".notdef", ".null", "nonmarkingreturn", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", # 10 "quotesingle", "parenleft", "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", # 20 "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "colon", # 30 "semicolon", "less", "equal", "greater", "question", "at", "A", "B", "C", "D", # 40 "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", # 50 "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", # 60 "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "grave", "a", "b", # 70 "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", # 80 "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", # 90 "w", "x", "y", "z", "braceleft", "bar", "braceright", "asciitilde", "Adieresis", "Aring", # 100 "Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis", "aacute", "agrave", "acircumflex", "adieresis", "atilde", # 110 "aring", "ccedilla", "eacute", "egrave", "ecircumflex", "edieresis", "iacute", "igrave", "icircumflex", "idieresis", # 120 "ntilde", "oacute", "ograve", "ocircumflex", "odieresis", "otilde", "uacute", "ugrave", "ucircumflex", "udieresis", # 130 "dagger", "degree", "cent", "sterling", "section", "bullet", "paragraph", "germandbls", "registered", "copyright", # 140 "trademark", "acute", "dieresis", "notequal", "AE", "Oslash", "infinity", "plusminus", "lessequal", "greaterequal", # 150 "yen", "mu", "partialdiff", "summation", "product", "pi", "integral", "ordfeminine", "ordmasculine", "Omega", # 160 "ae", "oslash", "questiondown", "exclamdown", "logicalnot", "radical", "florin", "approxequal", "Delta", "guillemotleft", # 170 "guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde", "Otilde", "OE", "oe", "endash", "emdash", # 180 "quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide", "lozenge", "ydieresis", "Ydieresis", "fraction", "currency", # 190 "guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl", "periodcentered", "quotesinglbase", "quotedblbase", "perthousand", "Acircumflex", # 200 "Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex", # 210 "apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave", "dotlessi", "circumflex", "tilde", "macron", "breve", # 220 "dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "Lslash", "lslash", "Scaron", "scaron", # 230 "Zcaron", "zcaron", "brokenbar", "Eth", "eth", "Yacute", "yacute", "Thorn", "thorn", "minus", # 240 "multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf", "onequarter", "threequarters", "franc", "Gbreve", "gbreve", # 250 "Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute", "Ccaron", "ccaron", "dcroat" ] # The list of standard `SID' glyph names. For the official list, # see Annex A of document at # # http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf . # sid_standard_names = \ [ # 0 ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", "ampersand", "quoteright", "parenleft", # 10 "parenright", "asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", # 20 "three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", # 30 "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", # 40 "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", # 50 "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", # 60 "bracketleft", "backslash", "bracketright", "asciicircum", "underscore", "quoteleft", "a", "b", "c", "d", # 70 "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", # 80 "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", # 90 "y", "z", "braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", "sterling", "fraction", # 100 "yen", "florin", "section", "currency", "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", # 110 "fl", "endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright", # 120 "guillemotright", "ellipsis", "perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", "macron", "breve", # 130 "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "emdash", "AE", "ordfeminine", # 140 "Lslash", "Oslash", "OE", "ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", # 150 "onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", "Thorn", "onequarter", "divide", # 160 "brokenbar", "degree", "thorn", "threequarters", "twosuperior", "registered", "minus", "eth", "multiply", "threesuperior", # 170 "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", # 180 "Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis", # 190 "Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", # 200 "aacute", "acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis", # 210 "egrave", "iacute", "icircumflex", "idieresis", "igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", # 220 "otilde", "scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall", # 230 "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", "parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", "zerooldstyle", # 240 "oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "commasuperior", # 250 "threequartersemdash", "periodsuperior", "questionsmall", "asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior", # 260 "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", "tsuperior", "ff", "ffi", "ffl", "parenleftinferior", # 270 "parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", "Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", # 280 "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", # 290 "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall", # 300 "colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", "Dieresissmall", # 310 "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", "questiondownsmall", # 320 "oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior", "foursuperior", "fivesuperior", "sixsuperior", # 330 "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", "fiveinferior", "sixinferior", # 340 "seveninferior", "eightinferior", "nineinferior", "centinferior", "dollarinferior", "periodinferior", "commainferior", "Agravesmall", "Aacutesmall", "Acircumflexsmall", # 350 "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", # 360 "Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", "Odieresissmall", # 370 "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall", "001.000", # 380 "001.001", "001.002", "001.003", "Black", "Bold", "Book", "Light", "Medium", "Regular", "Roman", # 390 "Semibold" ] # This table maps character codes of the Adobe Standard Type 1 # encoding to glyph indices in the sid_standard_names table. # t1_standard_encoding = \ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 0, 111, 112, 113, 114, 0, 115, 116, 117, 118, 119, 120, 121, 122, 0, 123, 0, 124, 125, 126, 127, 128, 129, 130, 131, 0, 132, 133, 0, 134, 135, 136, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 0, 139, 0, 0, 0, 0, 140, 141, 142, 143, 0, 0, 0, 0, 0, 144, 0, 0, 0, 145, 0, 0, 146, 147, 148, 149, 0, 0, 0, 0 ] # This table maps character codes of the Adobe Expert Type 1 # encoding to glyph indices in the sid_standard_names table. # t1_expert_encoding = \ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 229, 230, 0, 231, 232, 233, 234, 235, 236, 237, 238, 13, 14, 15, 99, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 27, 28, 249, 250, 251, 252, 0, 253, 254, 255, 256, 257, 0, 0, 0, 258, 0, 0, 259, 260, 261, 262, 0, 0, 263, 264, 265, 0, 266, 109, 110, 267, 268, 269, 0, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 304, 305, 306, 0, 0, 307, 308, 309, 310, 311, 0, 312, 0, 0, 313, 0, 0, 314, 315, 0, 0, 316, 317, 318, 0, 0, 0, 158, 155, 163, 319, 320, 321, 322, 323, 324, 325, 0, 0, 326, 150, 164, 169, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378 ] # This data has been taken literally from the files `glyphlist.txt' # and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from # # http://sourceforge.net/adobe/aglfn/ # adobe_glyph_list = """\ A;0041 AE;00C6 AEacute;01FC AEmacron;01E2 AEsmall;F7E6 Aacute;00C1 Aacutesmall;F7E1 Abreve;0102 Abreveacute;1EAE Abrevecyrillic;04D0 Abrevedotbelow;1EB6 Abrevegrave;1EB0 Abrevehookabove;1EB2 Abrevetilde;1EB4 Acaron;01CD Acircle;24B6 Acircumflex;00C2 Acircumflexacute;1EA4 Acircumflexdotbelow;1EAC Acircumflexgrave;1EA6 Acircumflexhookabove;1EA8 Acircumflexsmall;F7E2 Acircumflextilde;1EAA Acute;F6C9 Acutesmall;F7B4 Acyrillic;0410 Adblgrave;0200 Adieresis;00C4 Adieresiscyrillic;04D2 Adieresismacron;01DE Adieresissmall;F7E4 Adotbelow;1EA0 Adotmacron;01E0 Agrave;00C0 Agravesmall;F7E0 Ahookabove;1EA2 Aiecyrillic;04D4 Ainvertedbreve;0202 Alpha;0391 Alphatonos;0386 Amacron;0100 Amonospace;FF21 Aogonek;0104 Aring;00C5 Aringacute;01FA Aringbelow;1E00 Aringsmall;F7E5 Asmall;F761 Atilde;00C3 Atildesmall;F7E3 Aybarmenian;0531 B;0042 Bcircle;24B7 Bdotaccent;1E02 Bdotbelow;1E04 Becyrillic;0411 Benarmenian;0532 Beta;0392 Bhook;0181 Blinebelow;1E06 Bmonospace;FF22 Brevesmall;F6F4 Bsmall;F762 Btopbar;0182 C;0043 Caarmenian;053E Cacute;0106 Caron;F6CA Caronsmall;F6F5 Ccaron;010C Ccedilla;00C7 Ccedillaacute;1E08 Ccedillasmall;F7E7 Ccircle;24B8 Ccircumflex;0108 Cdot;010A Cdotaccent;010A Cedillasmall;F7B8 Chaarmenian;0549 Cheabkhasiancyrillic;04BC Checyrillic;0427 Chedescenderabkhasiancyrillic;04BE Chedescendercyrillic;04B6 Chedieresiscyrillic;04F4 Cheharmenian;0543 Chekhakassiancyrillic;04CB Cheverticalstrokecyrillic;04B8 Chi;03A7 Chook;0187 Circumflexsmall;F6F6 Cmonospace;FF23 Coarmenian;0551 Csmall;F763 D;0044 DZ;01F1 DZcaron;01C4 Daarmenian;0534 Dafrican;0189 Dcaron;010E Dcedilla;1E10 Dcircle;24B9 Dcircumflexbelow;1E12 Dcroat;0110 Ddotaccent;1E0A Ddotbelow;1E0C Decyrillic;0414 Deicoptic;03EE Delta;2206 Deltagreek;0394 Dhook;018A Dieresis;F6CB DieresisAcute;F6CC DieresisGrave;F6CD Dieresissmall;F7A8 Digammagreek;03DC Djecyrillic;0402 Dlinebelow;1E0E Dmonospace;FF24 Dotaccentsmall;F6F7 Dslash;0110 Dsmall;F764 Dtopbar;018B Dz;01F2 Dzcaron;01C5 Dzeabkhasiancyrillic;04E0 Dzecyrillic;0405 Dzhecyrillic;040F E;0045 Eacute;00C9 Eacutesmall;F7E9 Ebreve;0114 Ecaron;011A Ecedillabreve;1E1C Echarmenian;0535 Ecircle;24BA Ecircumflex;00CA Ecircumflexacute;1EBE Ecircumflexbelow;1E18 Ecircumflexdotbelow;1EC6 Ecircumflexgrave;1EC0 Ecircumflexhookabove;1EC2 Ecircumflexsmall;F7EA Ecircumflextilde;1EC4 Ecyrillic;0404 Edblgrave;0204 Edieresis;00CB Edieresissmall;F7EB Edot;0116 Edotaccent;0116 Edotbelow;1EB8 Efcyrillic;0424 Egrave;00C8 Egravesmall;F7E8 Eharmenian;0537 Ehookabove;1EBA Eightroman;2167 Einvertedbreve;0206 Eiotifiedcyrillic;0464 Elcyrillic;041B Elevenroman;216A Emacron;0112 Emacronacute;1E16 Emacrongrave;1E14 Emcyrillic;041C Emonospace;FF25 Encyrillic;041D Endescendercyrillic;04A2 Eng;014A Enghecyrillic;04A4 Enhookcyrillic;04C7 Eogonek;0118 Eopen;0190 Epsilon;0395 Epsilontonos;0388 Ercyrillic;0420 Ereversed;018E Ereversedcyrillic;042D Escyrillic;0421 Esdescendercyrillic;04AA Esh;01A9 Esmall;F765 Eta;0397 Etarmenian;0538 Etatonos;0389 Eth;00D0 Ethsmall;F7F0 Etilde;1EBC Etildebelow;1E1A Euro;20AC Ezh;01B7 Ezhcaron;01EE Ezhreversed;01B8 F;0046 Fcircle;24BB Fdotaccent;1E1E Feharmenian;0556 Feicoptic;03E4 Fhook;0191 Fitacyrillic;0472 Fiveroman;2164 Fmonospace;FF26 Fourroman;2163 Fsmall;F766 G;0047 GBsquare;3387 Gacute;01F4 Gamma;0393 Gammaafrican;0194 Gangiacoptic;03EA Gbreve;011E Gcaron;01E6 Gcedilla;0122 Gcircle;24BC Gcircumflex;011C Gcommaaccent;0122 Gdot;0120 Gdotaccent;0120 Gecyrillic;0413 Ghadarmenian;0542 Ghemiddlehookcyrillic;0494 Ghestrokecyrillic;0492 Gheupturncyrillic;0490 Ghook;0193 Gimarmenian;0533 Gjecyrillic;0403 Gmacron;1E20 Gmonospace;FF27 Grave;F6CE Gravesmall;F760 Gsmall;F767 Gsmallhook;029B Gstroke;01E4 H;0048 H18533;25CF H18543;25AA H18551;25AB H22073;25A1 HPsquare;33CB Haabkhasiancyrillic;04A8 Hadescendercyrillic;04B2 Hardsigncyrillic;042A Hbar;0126 Hbrevebelow;1E2A Hcedilla;1E28 Hcircle;24BD Hcircumflex;0124 Hdieresis;1E26 Hdotaccent;1E22 Hdotbelow;1E24 Hmonospace;FF28 Hoarmenian;0540 Horicoptic;03E8 Hsmall;F768 Hungarumlaut;F6CF Hungarumlautsmall;F6F8 Hzsquare;3390 I;0049 IAcyrillic;042F IJ;0132 IUcyrillic;042E Iacute;00CD Iacutesmall;F7ED Ibreve;012C Icaron;01CF Icircle;24BE Icircumflex;00CE Icircumflexsmall;F7EE Icyrillic;0406 Idblgrave;0208 Idieresis;00CF Idieresisacute;1E2E Idieresiscyrillic;04E4 Idieresissmall;F7EF Idot;0130 Idotaccent;0130 Idotbelow;1ECA Iebrevecyrillic;04D6 Iecyrillic;0415 Ifraktur;2111 Igrave;00CC Igravesmall;F7EC Ihookabove;1EC8 Iicyrillic;0418 Iinvertedbreve;020A Iishortcyrillic;0419 Imacron;012A Imacroncyrillic;04E2 Imonospace;FF29 Iniarmenian;053B Iocyrillic;0401 Iogonek;012E Iota;0399 Iotaafrican;0196 Iotadieresis;03AA Iotatonos;038A Ismall;F769 Istroke;0197 Itilde;0128 Itildebelow;1E2C Izhitsacyrillic;0474 Izhitsadblgravecyrillic;0476 J;004A Jaarmenian;0541 Jcircle;24BF Jcircumflex;0134 Jecyrillic;0408 Jheharmenian;054B Jmonospace;FF2A Jsmall;F76A K;004B KBsquare;3385 KKsquare;33CD Kabashkircyrillic;04A0 Kacute;1E30 Kacyrillic;041A Kadescendercyrillic;049A Kahookcyrillic;04C3 Kappa;039A Kastrokecyrillic;049E Kaverticalstrokecyrillic;049C Kcaron;01E8 Kcedilla;0136 Kcircle;24C0 Kcommaaccent;0136 Kdotbelow;1E32 Keharmenian;0554 Kenarmenian;053F Khacyrillic;0425 Kheicoptic;03E6 Khook;0198 Kjecyrillic;040C Klinebelow;1E34 Kmonospace;FF2B Koppacyrillic;0480 Koppagreek;03DE Ksicyrillic;046E Ksmall;F76B L;004C LJ;01C7 LL;F6BF Lacute;0139 Lambda;039B Lcaron;013D Lcedilla;013B Lcircle;24C1 Lcircumflexbelow;1E3C Lcommaaccent;013B Ldot;013F Ldotaccent;013F Ldotbelow;1E36 Ldotbelowmacron;1E38 Liwnarmenian;053C Lj;01C8 Ljecyrillic;0409 Llinebelow;1E3A Lmonospace;FF2C Lslash;0141 Lslashsmall;F6F9 Lsmall;F76C M;004D MBsquare;3386 Macron;F6D0 Macronsmall;F7AF Macute;1E3E Mcircle;24C2 Mdotaccent;1E40 Mdotbelow;1E42 Menarmenian;0544 Mmonospace;FF2D Msmall;F76D Mturned;019C Mu;039C N;004E NJ;01CA Nacute;0143 Ncaron;0147 Ncedilla;0145 Ncircle;24C3 Ncircumflexbelow;1E4A Ncommaaccent;0145 Ndotaccent;1E44 Ndotbelow;1E46 Nhookleft;019D Nineroman;2168 Nj;01CB Njecyrillic;040A Nlinebelow;1E48 Nmonospace;FF2E Nowarmenian;0546 Nsmall;F76E Ntilde;00D1 Ntildesmall;F7F1 Nu;039D O;004F OE;0152 OEsmall;F6FA Oacute;00D3 Oacutesmall;F7F3 Obarredcyrillic;04E8 Obarreddieresiscyrillic;04EA Obreve;014E Ocaron;01D1 Ocenteredtilde;019F Ocircle;24C4 Ocircumflex;00D4 Ocircumflexacute;1ED0 Ocircumflexdotbelow;1ED8 Ocircumflexgrave;1ED2 Ocircumflexhookabove;1ED4 Ocircumflexsmall;F7F4 Ocircumflextilde;1ED6 Ocyrillic;041E Odblacute;0150 Odblgrave;020C Odieresis;00D6 Odieresiscyrillic;04E6 Odieresissmall;F7F6 Odotbelow;1ECC Ogoneksmall;F6FB Ograve;00D2 Ogravesmall;F7F2 Oharmenian;0555 Ohm;2126 Ohookabove;1ECE Ohorn;01A0 Ohornacute;1EDA Ohorndotbelow;1EE2 Ohorngrave;1EDC Ohornhookabove;1EDE Ohorntilde;1EE0 Ohungarumlaut;0150 Oi;01A2 Oinvertedbreve;020E Omacron;014C Omacronacute;1E52 Omacrongrave;1E50 Omega;2126 Omegacyrillic;0460 Omegagreek;03A9 Omegaroundcyrillic;047A Omegatitlocyrillic;047C Omegatonos;038F Omicron;039F Omicrontonos;038C Omonospace;FF2F Oneroman;2160 Oogonek;01EA Oogonekmacron;01EC Oopen;0186 Oslash;00D8 Oslashacute;01FE Oslashsmall;F7F8 Osmall;F76F Ostrokeacute;01FE Otcyrillic;047E Otilde;00D5 Otildeacute;1E4C Otildedieresis;1E4E Otildesmall;F7F5 P;0050 Pacute;1E54 Pcircle;24C5 Pdotaccent;1E56 Pecyrillic;041F Peharmenian;054A Pemiddlehookcyrillic;04A6 Phi;03A6 Phook;01A4 Pi;03A0 Piwrarmenian;0553 Pmonospace;FF30 Psi;03A8 Psicyrillic;0470 Psmall;F770 Q;0051 Qcircle;24C6 Qmonospace;FF31 Qsmall;F771 R;0052 Raarmenian;054C Racute;0154 Rcaron;0158 Rcedilla;0156 Rcircle;24C7 Rcommaaccent;0156 Rdblgrave;0210 Rdotaccent;1E58 Rdotbelow;1E5A Rdotbelowmacron;1E5C Reharmenian;0550 Rfraktur;211C Rho;03A1 Ringsmall;F6FC Rinvertedbreve;0212 Rlinebelow;1E5E Rmonospace;FF32 Rsmall;F772 Rsmallinverted;0281 Rsmallinvertedsuperior;02B6 S;0053 SF010000;250C SF020000;2514 SF030000;2510 SF040000;2518 SF050000;253C SF060000;252C SF070000;2534 SF080000;251C SF090000;2524 SF100000;2500 SF110000;2502 SF190000;2561 SF200000;2562 SF210000;2556 SF220000;2555 SF230000;2563 SF240000;2551 SF250000;2557 SF260000;255D SF270000;255C SF280000;255B SF360000;255E SF370000;255F SF380000;255A SF390000;2554 SF400000;2569 SF410000;2566 SF420000;2560 SF430000;2550 SF440000;256C SF450000;2567 SF460000;2568 SF470000;2564 SF480000;2565 SF490000;2559 SF500000;2558 SF510000;2552 SF520000;2553 SF530000;256B SF540000;256A Sacute;015A Sacutedotaccent;1E64 Sampigreek;03E0 Scaron;0160 Scarondotaccent;1E66 Scaronsmall;F6FD Scedilla;015E Schwa;018F Schwacyrillic;04D8 Schwadieresiscyrillic;04DA Scircle;24C8 Scircumflex;015C Scommaaccent;0218 Sdotaccent;1E60 Sdotbelow;1E62 Sdotbelowdotaccent;1E68 Seharmenian;054D Sevenroman;2166 Shaarmenian;0547 Shacyrillic;0428 Shchacyrillic;0429 Sheicoptic;03E2 Shhacyrillic;04BA Shimacoptic;03EC Sigma;03A3 Sixroman;2165 Smonospace;FF33 Softsigncyrillic;042C Ssmall;F773 Stigmagreek;03DA T;0054 Tau;03A4 Tbar;0166 Tcaron;0164 Tcedilla;0162 Tcircle;24C9 Tcircumflexbelow;1E70 Tcommaaccent;0162 Tdotaccent;1E6A Tdotbelow;1E6C Tecyrillic;0422 Tedescendercyrillic;04AC Tenroman;2169 Tetsecyrillic;04B4 Theta;0398 Thook;01AC Thorn;00DE Thornsmall;F7FE Threeroman;2162 Tildesmall;F6FE Tiwnarmenian;054F Tlinebelow;1E6E Tmonospace;FF34 Toarmenian;0539 Tonefive;01BC Tonesix;0184 Tonetwo;01A7 Tretroflexhook;01AE Tsecyrillic;0426 Tshecyrillic;040B Tsmall;F774 Twelveroman;216B Tworoman;2161 U;0055 Uacute;00DA Uacutesmall;F7FA Ubreve;016C Ucaron;01D3 Ucircle;24CA Ucircumflex;00DB Ucircumflexbelow;1E76 Ucircumflexsmall;F7FB Ucyrillic;0423 Udblacute;0170 Udblgrave;0214 Udieresis;00DC Udieresisacute;01D7 Udieresisbelow;1E72 Udieresiscaron;01D9 Udieresiscyrillic;04F0 Udieresisgrave;01DB Udieresismacron;01D5 Udieresissmall;F7FC Udotbelow;1EE4 Ugrave;00D9 Ugravesmall;F7F9 Uhookabove;1EE6 Uhorn;01AF Uhornacute;1EE8 Uhorndotbelow;1EF0 Uhorngrave;1EEA Uhornhookabove;1EEC Uhorntilde;1EEE Uhungarumlaut;0170 Uhungarumlautcyrillic;04F2 Uinvertedbreve;0216 Ukcyrillic;0478 Umacron;016A Umacroncyrillic;04EE Umacrondieresis;1E7A Umonospace;FF35 Uogonek;0172 Upsilon;03A5 Upsilon1;03D2 Upsilonacutehooksymbolgreek;03D3 Upsilonafrican;01B1 Upsilondieresis;03AB Upsilondieresishooksymbolgreek;03D4 Upsilonhooksymbol;03D2 Upsilontonos;038E Uring;016E Ushortcyrillic;040E Usmall;F775 Ustraightcyrillic;04AE Ustraightstrokecyrillic;04B0 Utilde;0168 Utildeacute;1E78 Utildebelow;1E74 V;0056 Vcircle;24CB Vdotbelow;1E7E Vecyrillic;0412 Vewarmenian;054E Vhook;01B2 Vmonospace;FF36 Voarmenian;0548 Vsmall;F776 Vtilde;1E7C W;0057 Wacute;1E82 Wcircle;24CC Wcircumflex;0174 Wdieresis;1E84 Wdotaccent;1E86 Wdotbelow;1E88 Wgrave;1E80 Wmonospace;FF37 Wsmall;F777 X;0058 Xcircle;24CD Xdieresis;1E8C Xdotaccent;1E8A Xeharmenian;053D Xi;039E Xmonospace;FF38 Xsmall;F778 Y;0059 Yacute;00DD Yacutesmall;F7FD Yatcyrillic;0462 Ycircle;24CE Ycircumflex;0176 Ydieresis;0178 Ydieresissmall;F7FF Ydotaccent;1E8E Ydotbelow;1EF4 Yericyrillic;042B Yerudieresiscyrillic;04F8 Ygrave;1EF2 Yhook;01B3 Yhookabove;1EF6 Yiarmenian;0545 Yicyrillic;0407 Yiwnarmenian;0552 Ymonospace;FF39 Ysmall;F779 Ytilde;1EF8 Yusbigcyrillic;046A Yusbigiotifiedcyrillic;046C Yuslittlecyrillic;0466 Yuslittleiotifiedcyrillic;0468 Z;005A Zaarmenian;0536 Zacute;0179 Zcaron;017D Zcaronsmall;F6FF Zcircle;24CF Zcircumflex;1E90 Zdot;017B Zdotaccent;017B Zdotbelow;1E92 Zecyrillic;0417 Zedescendercyrillic;0498 Zedieresiscyrillic;04DE Zeta;0396 Zhearmenian;053A Zhebrevecyrillic;04C1 Zhecyrillic;0416 Zhedescendercyrillic;0496 Zhedieresiscyrillic;04DC Zlinebelow;1E94 Zmonospace;FF3A Zsmall;F77A Zstroke;01B5 a;0061 aabengali;0986 aacute;00E1 aadeva;0906 aagujarati;0A86 aagurmukhi;0A06 aamatragurmukhi;0A3E aarusquare;3303 aavowelsignbengali;09BE aavowelsigndeva;093E aavowelsigngujarati;0ABE abbreviationmarkarmenian;055F abbreviationsigndeva;0970 abengali;0985 abopomofo;311A abreve;0103 abreveacute;1EAF abrevecyrillic;04D1 abrevedotbelow;1EB7 abrevegrave;1EB1 abrevehookabove;1EB3 abrevetilde;1EB5 acaron;01CE acircle;24D0 acircumflex;00E2 acircumflexacute;1EA5 acircumflexdotbelow;1EAD acircumflexgrave;1EA7 acircumflexhookabove;1EA9 acircumflextilde;1EAB acute;00B4 acutebelowcmb;0317 acutecmb;0301 acutecomb;0301 acutedeva;0954 acutelowmod;02CF acutetonecmb;0341 acyrillic;0430 adblgrave;0201 addakgurmukhi;0A71 adeva;0905 adieresis;00E4 adieresiscyrillic;04D3 adieresismacron;01DF adotbelow;1EA1 adotmacron;01E1 ae;00E6 aeacute;01FD aekorean;3150 aemacron;01E3 afii00208;2015 afii08941;20A4 afii10017;0410 afii10018;0411 afii10019;0412 afii10020;0413 afii10021;0414 afii10022;0415 afii10023;0401 afii10024;0416 afii10025;0417 afii10026;0418 afii10027;0419 afii10028;041A afii10029;041B afii10030;041C afii10031;041D afii10032;041E afii10033;041F afii10034;0420 afii10035;0421 afii10036;0422 afii10037;0423 afii10038;0424 afii10039;0425 afii10040;0426 afii10041;0427 afii10042;0428 afii10043;0429 afii10044;042A afii10045;042B afii10046;042C afii10047;042D afii10048;042E afii10049;042F afii10050;0490 afii10051;0402 afii10052;0403 afii10053;0404 afii10054;0405 afii10055;0406 afii10056;0407 afii10057;0408 afii10058;0409 afii10059;040A afii10060;040B afii10061;040C afii10062;040E afii10063;F6C4 afii10064;F6C5 afii10065;0430 afii10066;0431 afii10067;0432 afii10068;0433 afii10069;0434 afii10070;0435 afii10071;0451 afii10072;0436 afii10073;0437 afii10074;0438 afii10075;0439 afii10076;043A afii10077;043B afii10078;043C afii10079;043D afii10080;043E afii10081;043F afii10082;0440 afii10083;0441 afii10084;0442 afii10085;0443 afii10086;0444 afii10087;0445 afii10088;0446 afii10089;0447 afii10090;0448 afii10091;0449 afii10092;044A afii10093;044B afii10094;044C afii10095;044D afii10096;044E afii10097;044F afii10098;0491 afii10099;0452 afii10100;0453 afii10101;0454 afii10102;0455 afii10103;0456 afii10104;0457 afii10105;0458 afii10106;0459 afii10107;045A afii10108;045B afii10109;045C afii10110;045E afii10145;040F afii10146;0462 afii10147;0472 afii10148;0474 afii10192;F6C6 afii10193;045F afii10194;0463 afii10195;0473 afii10196;0475 afii10831;F6C7 afii10832;F6C8 afii10846;04D9 afii299;200E afii300;200F afii301;200D afii57381;066A afii57388;060C afii57392;0660 afii57393;0661 afii57394;0662 afii57395;0663 afii57396;0664 afii57397;0665 afii57398;0666 afii57399;0667 afii57400;0668 afii57401;0669 afii57403;061B afii57407;061F afii57409;0621 afii57410;0622 afii57411;0623 afii57412;0624 afii57413;0625 afii57414;0626 afii57415;0627 afii57416;0628 afii57417;0629 afii57418;062A afii57419;062B afii57420;062C afii57421;062D afii57422;062E afii57423;062F afii57424;0630 afii57425;0631 afii57426;0632 afii57427;0633 afii57428;0634 afii57429;0635 afii57430;0636 afii57431;0637 afii57432;0638 afii57433;0639 afii57434;063A afii57440;0640 afii57441;0641 afii57442;0642 afii57443;0643 afii57444;0644 afii57445;0645 afii57446;0646 afii57448;0648 afii57449;0649 afii57450;064A afii57451;064B afii57452;064C afii57453;064D afii57454;064E afii57455;064F afii57456;0650 afii57457;0651 afii57458;0652 afii57470;0647 afii57505;06A4 afii57506;067E afii57507;0686 afii57508;0698 afii57509;06AF afii57511;0679 afii57512;0688 afii57513;0691 afii57514;06BA afii57519;06D2 afii57534;06D5 afii57636;20AA afii57645;05BE afii57658;05C3 afii57664;05D0 afii57665;05D1 afii57666;05D2 afii57667;05D3 afii57668;05D4 afii57669;05D5 afii57670;05D6 afii57671;05D7 afii57672;05D8 afii57673;05D9 afii57674;05DA afii57675;05DB afii57676;05DC afii57677;05DD afii57678;05DE afii57679;05DF afii57680;05E0 afii57681;05E1 afii57682;05E2 afii57683;05E3 afii57684;05E4 afii57685;05E5 afii57686;05E6 afii57687;05E7 afii57688;05E8 afii57689;05E9 afii57690;05EA afii57694;FB2A afii57695;FB2B afii57700;FB4B afii57705;FB1F afii57716;05F0 afii57717;05F1 afii57718;05F2 afii57723;FB35 afii57793;05B4 afii57794;05B5 afii57795;05B6 afii57796;05BB afii57797;05B8 afii57798;05B7 afii57799;05B0 afii57800;05B2 afii57801;05B1 afii57802;05B3 afii57803;05C2 afii57804;05C1 afii57806;05B9 afii57807;05BC afii57839;05BD afii57841;05BF afii57842;05C0 afii57929;02BC afii61248;2105 afii61289;2113 afii61352;2116 afii61573;202C afii61574;202D afii61575;202E afii61664;200C afii63167;066D afii64937;02BD agrave;00E0 agujarati;0A85 agurmukhi;0A05 ahiragana;3042 ahookabove;1EA3 aibengali;0990 aibopomofo;311E aideva;0910 aiecyrillic;04D5 aigujarati;0A90 aigurmukhi;0A10 aimatragurmukhi;0A48 ainarabic;0639 ainfinalarabic;FECA aininitialarabic;FECB ainmedialarabic;FECC ainvertedbreve;0203 aivowelsignbengali;09C8 aivowelsigndeva;0948 aivowelsigngujarati;0AC8 akatakana;30A2 akatakanahalfwidth;FF71 akorean;314F alef;05D0 alefarabic;0627 alefdageshhebrew;FB30 aleffinalarabic;FE8E alefhamzaabovearabic;0623 alefhamzaabovefinalarabic;FE84 alefhamzabelowarabic;0625 alefhamzabelowfinalarabic;FE88 alefhebrew;05D0 aleflamedhebrew;FB4F alefmaddaabovearabic;0622 alefmaddaabovefinalarabic;FE82 alefmaksuraarabic;0649 alefmaksurafinalarabic;FEF0 alefmaksurainitialarabic;FEF3 alefmaksuramedialarabic;FEF4 alefpatahhebrew;FB2E alefqamatshebrew;FB2F aleph;2135 allequal;224C alpha;03B1 alphatonos;03AC amacron;0101 amonospace;FF41 ampersand;0026 ampersandmonospace;FF06 ampersandsmall;F726 amsquare;33C2 anbopomofo;3122 angbopomofo;3124 angkhankhuthai;0E5A angle;2220 anglebracketleft;3008 anglebracketleftvertical;FE3F anglebracketright;3009 anglebracketrightvertical;FE40 angleleft;2329 angleright;232A angstrom;212B anoteleia;0387 anudattadeva;0952 anusvarabengali;0982 anusvaradeva;0902 anusvaragujarati;0A82 aogonek;0105 apaatosquare;3300 aparen;249C apostrophearmenian;055A apostrophemod;02BC apple;F8FF approaches;2250 approxequal;2248 approxequalorimage;2252 approximatelyequal;2245 araeaekorean;318E araeakorean;318D arc;2312 arighthalfring;1E9A aring;00E5 aringacute;01FB aringbelow;1E01 arrowboth;2194 arrowdashdown;21E3 arrowdashleft;21E0 arrowdashright;21E2 arrowdashup;21E1 arrowdblboth;21D4 arrowdbldown;21D3 arrowdblleft;21D0 arrowdblright;21D2 arrowdblup;21D1 arrowdown;2193 arrowdownleft;2199 arrowdownright;2198 arrowdownwhite;21E9 arrowheaddownmod;02C5 arrowheadleftmod;02C2 arrowheadrightmod;02C3 arrowheadupmod;02C4 arrowhorizex;F8E7 arrowleft;2190 arrowleftdbl;21D0 arrowleftdblstroke;21CD arrowleftoverright;21C6 arrowleftwhite;21E6 arrowright;2192 arrowrightdblstroke;21CF arrowrightheavy;279E arrowrightoverleft;21C4 arrowrightwhite;21E8 arrowtableft;21E4 arrowtabright;21E5 arrowup;2191 arrowupdn;2195 arrowupdnbse;21A8 arrowupdownbase;21A8 arrowupleft;2196 arrowupleftofdown;21C5 arrowupright;2197 arrowupwhite;21E7 arrowvertex;F8E6 asciicircum;005E asciicircummonospace;FF3E asciitilde;007E asciitildemonospace;FF5E ascript;0251 ascriptturned;0252 asmallhiragana;3041 asmallkatakana;30A1 asmallkatakanahalfwidth;FF67 asterisk;002A asteriskaltonearabic;066D asteriskarabic;066D asteriskmath;2217 asteriskmonospace;FF0A asterisksmall;FE61 asterism;2042 asuperior;F6E9 asymptoticallyequal;2243 at;0040 atilde;00E3 atmonospace;FF20 atsmall;FE6B aturned;0250 aubengali;0994 aubopomofo;3120 audeva;0914 augujarati;0A94 augurmukhi;0A14 aulengthmarkbengali;09D7 aumatragurmukhi;0A4C auvowelsignbengali;09CC auvowelsigndeva;094C auvowelsigngujarati;0ACC avagrahadeva;093D aybarmenian;0561 ayin;05E2 ayinaltonehebrew;FB20 ayinhebrew;05E2 b;0062 babengali;09AC backslash;005C backslashmonospace;FF3C badeva;092C bagujarati;0AAC bagurmukhi;0A2C bahiragana;3070 bahtthai;0E3F bakatakana;30D0 bar;007C barmonospace;FF5C bbopomofo;3105 bcircle;24D1 bdotaccent;1E03 bdotbelow;1E05 beamedsixteenthnotes;266C because;2235 becyrillic;0431 beharabic;0628 behfinalarabic;FE90 behinitialarabic;FE91 behiragana;3079 behmedialarabic;FE92 behmeeminitialarabic;FC9F behmeemisolatedarabic;FC08 behnoonfinalarabic;FC6D bekatakana;30D9 benarmenian;0562 bet;05D1 beta;03B2 betasymbolgreek;03D0 betdagesh;FB31 betdageshhebrew;FB31 bethebrew;05D1 betrafehebrew;FB4C bhabengali;09AD bhadeva;092D bhagujarati;0AAD bhagurmukhi;0A2D bhook;0253 bihiragana;3073 bikatakana;30D3 bilabialclick;0298 bindigurmukhi;0A02 birusquare;3331 blackcircle;25CF blackdiamond;25C6 blackdownpointingtriangle;25BC blackleftpointingpointer;25C4 blackleftpointingtriangle;25C0 blacklenticularbracketleft;3010 blacklenticularbracketleftvertical;FE3B blacklenticularbracketright;3011 blacklenticularbracketrightvertical;FE3C blacklowerlefttriangle;25E3 blacklowerrighttriangle;25E2 blackrectangle;25AC blackrightpointingpointer;25BA blackrightpointingtriangle;25B6 blacksmallsquare;25AA blacksmilingface;263B blacksquare;25A0 blackstar;2605 blackupperlefttriangle;25E4 blackupperrighttriangle;25E5 blackuppointingsmalltriangle;25B4 blackuppointingtriangle;25B2 blank;2423 blinebelow;1E07 block;2588 bmonospace;FF42 bobaimaithai;0E1A bohiragana;307C bokatakana;30DC bparen;249D bqsquare;33C3 braceex;F8F4 braceleft;007B braceleftbt;F8F3 braceleftmid;F8F2 braceleftmonospace;FF5B braceleftsmall;FE5B bracelefttp;F8F1 braceleftvertical;FE37 braceright;007D bracerightbt;F8FE bracerightmid;F8FD bracerightmonospace;FF5D bracerightsmall;FE5C bracerighttp;F8FC bracerightvertical;FE38 bracketleft;005B bracketleftbt;F8F0 bracketleftex;F8EF bracketleftmonospace;FF3B bracketlefttp;F8EE bracketright;005D bracketrightbt;F8FB bracketrightex;F8FA bracketrightmonospace;FF3D bracketrighttp;F8F9 breve;02D8 brevebelowcmb;032E brevecmb;0306 breveinvertedbelowcmb;032F breveinvertedcmb;0311 breveinverteddoublecmb;0361 bridgebelowcmb;032A bridgeinvertedbelowcmb;033A brokenbar;00A6 bstroke;0180 bsuperior;F6EA btopbar;0183 buhiragana;3076 bukatakana;30D6 bullet;2022 bulletinverse;25D8 bulletoperator;2219 bullseye;25CE c;0063 caarmenian;056E cabengali;099A cacute;0107 cadeva;091A cagujarati;0A9A cagurmukhi;0A1A calsquare;3388 candrabindubengali;0981 candrabinducmb;0310 candrabindudeva;0901 candrabindugujarati;0A81 capslock;21EA careof;2105 caron;02C7 caronbelowcmb;032C caroncmb;030C carriagereturn;21B5 cbopomofo;3118 ccaron;010D ccedilla;00E7 ccedillaacute;1E09 ccircle;24D2 ccircumflex;0109 ccurl;0255 cdot;010B cdotaccent;010B cdsquare;33C5 cedilla;00B8 cedillacmb;0327 cent;00A2 centigrade;2103 centinferior;F6DF centmonospace;FFE0 centoldstyle;F7A2 centsuperior;F6E0 chaarmenian;0579 chabengali;099B chadeva;091B chagujarati;0A9B chagurmukhi;0A1B chbopomofo;3114 cheabkhasiancyrillic;04BD checkmark;2713 checyrillic;0447 chedescenderabkhasiancyrillic;04BF chedescendercyrillic;04B7 chedieresiscyrillic;04F5 cheharmenian;0573 chekhakassiancyrillic;04CC cheverticalstrokecyrillic;04B9 chi;03C7 chieuchacirclekorean;3277 chieuchaparenkorean;3217 chieuchcirclekorean;3269 chieuchkorean;314A chieuchparenkorean;3209 chochangthai;0E0A chochanthai;0E08 chochingthai;0E09 chochoethai;0E0C chook;0188 cieucacirclekorean;3276 cieucaparenkorean;3216 cieuccirclekorean;3268 cieuckorean;3148 cieucparenkorean;3208 cieucuparenkorean;321C circle;25CB circlemultiply;2297 circleot;2299 circleplus;2295 circlepostalmark;3036 circlewithlefthalfblack;25D0 circlewithrighthalfblack;25D1 circumflex;02C6 circumflexbelowcmb;032D circumflexcmb;0302 clear;2327 clickalveolar;01C2 clickdental;01C0 clicklateral;01C1 clickretroflex;01C3 club;2663 clubsuitblack;2663 clubsuitwhite;2667 cmcubedsquare;33A4 cmonospace;FF43 cmsquaredsquare;33A0 coarmenian;0581 colon;003A colonmonetary;20A1 colonmonospace;FF1A colonsign;20A1 colonsmall;FE55 colontriangularhalfmod;02D1 colontriangularmod;02D0 comma;002C commaabovecmb;0313 commaaboverightcmb;0315 commaaccent;F6C3 commaarabic;060C commaarmenian;055D commainferior;F6E1 commamonospace;FF0C commareversedabovecmb;0314 commareversedmod;02BD commasmall;FE50 commasuperior;F6E2 commaturnedabovecmb;0312 commaturnedmod;02BB compass;263C congruent;2245 contourintegral;222E control;2303 controlACK;0006 controlBEL;0007 controlBS;0008 controlCAN;0018 controlCR;000D controlDC1;0011 controlDC2;0012 controlDC3;0013 controlDC4;0014 controlDEL;007F controlDLE;0010 controlEM;0019 controlENQ;0005 controlEOT;0004 controlESC;001B controlETB;0017 controlETX;0003 controlFF;000C controlFS;001C controlGS;001D controlHT;0009 controlLF;000A controlNAK;0015 controlRS;001E controlSI;000F controlSO;000E controlSOT;0002 controlSTX;0001 controlSUB;001A controlSYN;0016 controlUS;001F controlVT;000B copyright;00A9 copyrightsans;F8E9 copyrightserif;F6D9 cornerbracketleft;300C cornerbracketlefthalfwidth;FF62 cornerbracketleftvertical;FE41 cornerbracketright;300D cornerbracketrighthalfwidth;FF63 cornerbracketrightvertical;FE42 corporationsquare;337F cosquare;33C7 coverkgsquare;33C6 cparen;249E cruzeiro;20A2 cstretched;0297 curlyand;22CF curlyor;22CE currency;00A4 cyrBreve;F6D1 cyrFlex;F6D2 cyrbreve;F6D4 cyrflex;F6D5 d;0064 daarmenian;0564 dabengali;09A6 dadarabic;0636 dadeva;0926 dadfinalarabic;FEBE dadinitialarabic;FEBF dadmedialarabic;FEC0 dagesh;05BC dageshhebrew;05BC dagger;2020 daggerdbl;2021 dagujarati;0AA6 dagurmukhi;0A26 dahiragana;3060 dakatakana;30C0 dalarabic;062F dalet;05D3 daletdagesh;FB33 daletdageshhebrew;FB33 dalethatafpatah;05D3 05B2 dalethatafpatahhebrew;05D3 05B2 dalethatafsegol;05D3 05B1 dalethatafsegolhebrew;05D3 05B1 dalethebrew;05D3 dalethiriq;05D3 05B4 dalethiriqhebrew;05D3 05B4 daletholam;05D3 05B9 daletholamhebrew;05D3 05B9 daletpatah;05D3 05B7 daletpatahhebrew;05D3 05B7 daletqamats;05D3 05B8 daletqamatshebrew;05D3 05B8 daletqubuts;05D3 05BB daletqubutshebrew;05D3 05BB daletsegol;05D3 05B6 daletsegolhebrew;05D3 05B6 daletsheva;05D3 05B0 daletshevahebrew;05D3 05B0 dalettsere;05D3 05B5 dalettserehebrew;05D3 05B5 dalfinalarabic;FEAA dammaarabic;064F dammalowarabic;064F dammatanaltonearabic;064C dammatanarabic;064C danda;0964 dargahebrew;05A7 dargalefthebrew;05A7 dasiapneumatacyrilliccmb;0485 dblGrave;F6D3 dblanglebracketleft;300A dblanglebracketleftvertical;FE3D dblanglebracketright;300B dblanglebracketrightvertical;FE3E dblarchinvertedbelowcmb;032B dblarrowleft;21D4 dblarrowright;21D2 dbldanda;0965 dblgrave;F6D6 dblgravecmb;030F dblintegral;222C dbllowline;2017 dbllowlinecmb;0333 dbloverlinecmb;033F dblprimemod;02BA dblverticalbar;2016 dblverticallineabovecmb;030E dbopomofo;3109 dbsquare;33C8 dcaron;010F dcedilla;1E11 dcircle;24D3 dcircumflexbelow;1E13 dcroat;0111 ddabengali;09A1 ddadeva;0921 ddagujarati;0AA1 ddagurmukhi;0A21 ddalarabic;0688 ddalfinalarabic;FB89 dddhadeva;095C ddhabengali;09A2 ddhadeva;0922 ddhagujarati;0AA2 ddhagurmukhi;0A22 ddotaccent;1E0B ddotbelow;1E0D decimalseparatorarabic;066B decimalseparatorpersian;066B decyrillic;0434 degree;00B0 dehihebrew;05AD dehiragana;3067 deicoptic;03EF dekatakana;30C7 deleteleft;232B deleteright;2326 delta;03B4 deltaturned;018D denominatorminusonenumeratorbengali;09F8 dezh;02A4 dhabengali;09A7 dhadeva;0927 dhagujarati;0AA7 dhagurmukhi;0A27 dhook;0257 dialytikatonos;0385 dialytikatonoscmb;0344 diamond;2666 diamondsuitwhite;2662 dieresis;00A8 dieresisacute;F6D7 dieresisbelowcmb;0324 dieresiscmb;0308 dieresisgrave;F6D8 dieresistonos;0385 dihiragana;3062 dikatakana;30C2 dittomark;3003 divide;00F7 divides;2223 divisionslash;2215 djecyrillic;0452 dkshade;2593 dlinebelow;1E0F dlsquare;3397 dmacron;0111 dmonospace;FF44 dnblock;2584 dochadathai;0E0E dodekthai;0E14 dohiragana;3069 dokatakana;30C9 dollar;0024 dollarinferior;F6E3 dollarmonospace;FF04 dollaroldstyle;F724 dollarsmall;FE69 dollarsuperior;F6E4 dong;20AB dorusquare;3326 dotaccent;02D9 dotaccentcmb;0307 dotbelowcmb;0323 dotbelowcomb;0323 dotkatakana;30FB dotlessi;0131 dotlessj;F6BE dotlessjstrokehook;0284 dotmath;22C5 dottedcircle;25CC doubleyodpatah;FB1F doubleyodpatahhebrew;FB1F downtackbelowcmb;031E downtackmod;02D5 dparen;249F dsuperior;F6EB dtail;0256 dtopbar;018C duhiragana;3065 dukatakana;30C5 dz;01F3 dzaltone;02A3 dzcaron;01C6 dzcurl;02A5 dzeabkhasiancyrillic;04E1 dzecyrillic;0455 dzhecyrillic;045F e;0065 eacute;00E9 earth;2641 ebengali;098F ebopomofo;311C ebreve;0115 ecandradeva;090D ecandragujarati;0A8D ecandravowelsigndeva;0945 ecandravowelsigngujarati;0AC5 ecaron;011B ecedillabreve;1E1D echarmenian;0565 echyiwnarmenian;0587 ecircle;24D4 ecircumflex;00EA ecircumflexacute;1EBF ecircumflexbelow;1E19 ecircumflexdotbelow;1EC7 ecircumflexgrave;1EC1 ecircumflexhookabove;1EC3 ecircumflextilde;1EC5 ecyrillic;0454 edblgrave;0205 edeva;090F edieresis;00EB edot;0117 edotaccent;0117 edotbelow;1EB9 eegurmukhi;0A0F eematragurmukhi;0A47 efcyrillic;0444 egrave;00E8 egujarati;0A8F eharmenian;0567 ehbopomofo;311D ehiragana;3048 ehookabove;1EBB eibopomofo;311F eight;0038 eightarabic;0668 eightbengali;09EE eightcircle;2467 eightcircleinversesansserif;2791 eightdeva;096E eighteencircle;2471 eighteenparen;2485 eighteenperiod;2499 eightgujarati;0AEE eightgurmukhi;0A6E eighthackarabic;0668 eighthangzhou;3028 eighthnotebeamed;266B eightideographicparen;3227 eightinferior;2088 eightmonospace;FF18 eightoldstyle;F738 eightparen;247B eightperiod;248F eightpersian;06F8 eightroman;2177 eightsuperior;2078 eightthai;0E58 einvertedbreve;0207 eiotifiedcyrillic;0465 ekatakana;30A8 ekatakanahalfwidth;FF74 ekonkargurmukhi;0A74 ekorean;3154 elcyrillic;043B element;2208 elevencircle;246A elevenparen;247E elevenperiod;2492 elevenroman;217A ellipsis;2026 ellipsisvertical;22EE emacron;0113 emacronacute;1E17 emacrongrave;1E15 emcyrillic;043C emdash;2014 emdashvertical;FE31 emonospace;FF45 emphasismarkarmenian;055B emptyset;2205 enbopomofo;3123 encyrillic;043D endash;2013 endashvertical;FE32 endescendercyrillic;04A3 eng;014B engbopomofo;3125 enghecyrillic;04A5 enhookcyrillic;04C8 enspace;2002 eogonek;0119 eokorean;3153 eopen;025B eopenclosed;029A eopenreversed;025C eopenreversedclosed;025E eopenreversedhook;025D eparen;24A0 epsilon;03B5 epsilontonos;03AD equal;003D equalmonospace;FF1D equalsmall;FE66 equalsuperior;207C equivalence;2261 erbopomofo;3126 ercyrillic;0440 ereversed;0258 ereversedcyrillic;044D escyrillic;0441 esdescendercyrillic;04AB esh;0283 eshcurl;0286 eshortdeva;090E eshortvowelsigndeva;0946 eshreversedloop;01AA eshsquatreversed;0285 esmallhiragana;3047 esmallkatakana;30A7 esmallkatakanahalfwidth;FF6A estimated;212E esuperior;F6EC eta;03B7 etarmenian;0568 etatonos;03AE eth;00F0 etilde;1EBD etildebelow;1E1B etnahtafoukhhebrew;0591 etnahtafoukhlefthebrew;0591 etnahtahebrew;0591 etnahtalefthebrew;0591 eturned;01DD eukorean;3161 euro;20AC evowelsignbengali;09C7 evowelsigndeva;0947 evowelsigngujarati;0AC7 exclam;0021 exclamarmenian;055C exclamdbl;203C exclamdown;00A1 exclamdownsmall;F7A1 exclammonospace;FF01 exclamsmall;F721 existential;2203 ezh;0292 ezhcaron;01EF ezhcurl;0293 ezhreversed;01B9 ezhtail;01BA f;0066 fadeva;095E fagurmukhi;0A5E fahrenheit;2109 fathaarabic;064E fathalowarabic;064E fathatanarabic;064B fbopomofo;3108 fcircle;24D5 fdotaccent;1E1F feharabic;0641 feharmenian;0586 fehfinalarabic;FED2 fehinitialarabic;FED3 fehmedialarabic;FED4 feicoptic;03E5 female;2640 ff;FB00 ffi;FB03 ffl;FB04 fi;FB01 fifteencircle;246E fifteenparen;2482 fifteenperiod;2496 figuredash;2012 filledbox;25A0 filledrect;25AC finalkaf;05DA finalkafdagesh;FB3A finalkafdageshhebrew;FB3A finalkafhebrew;05DA finalkafqamats;05DA 05B8 finalkafqamatshebrew;05DA 05B8 finalkafsheva;05DA 05B0 finalkafshevahebrew;05DA 05B0 finalmem;05DD finalmemhebrew;05DD finalnun;05DF finalnunhebrew;05DF finalpe;05E3 finalpehebrew;05E3 finaltsadi;05E5 finaltsadihebrew;05E5 firsttonechinese;02C9 fisheye;25C9 fitacyrillic;0473 five;0035 fivearabic;0665 fivebengali;09EB fivecircle;2464 fivecircleinversesansserif;278E fivedeva;096B fiveeighths;215D fivegujarati;0AEB fivegurmukhi;0A6B fivehackarabic;0665 fivehangzhou;3025 fiveideographicparen;3224 fiveinferior;2085 fivemonospace;FF15 fiveoldstyle;F735 fiveparen;2478 fiveperiod;248C fivepersian;06F5 fiveroman;2174 fivesuperior;2075 fivethai;0E55 fl;FB02 florin;0192 fmonospace;FF46 fmsquare;3399 fofanthai;0E1F fofathai;0E1D fongmanthai;0E4F forall;2200 four;0034 fourarabic;0664 fourbengali;09EA fourcircle;2463 fourcircleinversesansserif;278D fourdeva;096A fourgujarati;0AEA fourgurmukhi;0A6A fourhackarabic;0664 fourhangzhou;3024 fourideographicparen;3223 fourinferior;2084 fourmonospace;FF14 fournumeratorbengali;09F7 fouroldstyle;F734 fourparen;2477 fourperiod;248B fourpersian;06F4 fourroman;2173 foursuperior;2074 fourteencircle;246D fourteenparen;2481 fourteenperiod;2495 fourthai;0E54 fourthtonechinese;02CB fparen;24A1 fraction;2044 franc;20A3 g;0067 gabengali;0997 gacute;01F5 gadeva;0917 gafarabic;06AF gaffinalarabic;FB93 gafinitialarabic;FB94 gafmedialarabic;FB95 gagujarati;0A97 gagurmukhi;0A17 gahiragana;304C gakatakana;30AC gamma;03B3 gammalatinsmall;0263 gammasuperior;02E0 gangiacoptic;03EB gbopomofo;310D gbreve;011F gcaron;01E7 gcedilla;0123 gcircle;24D6 gcircumflex;011D gcommaaccent;0123 gdot;0121 gdotaccent;0121 gecyrillic;0433 gehiragana;3052 gekatakana;30B2 geometricallyequal;2251 gereshaccenthebrew;059C gereshhebrew;05F3 gereshmuqdamhebrew;059D germandbls;00DF gershayimaccenthebrew;059E gershayimhebrew;05F4 getamark;3013 ghabengali;0998 ghadarmenian;0572 ghadeva;0918 ghagujarati;0A98 ghagurmukhi;0A18 ghainarabic;063A ghainfinalarabic;FECE ghaininitialarabic;FECF ghainmedialarabic;FED0 ghemiddlehookcyrillic;0495 ghestrokecyrillic;0493 gheupturncyrillic;0491 ghhadeva;095A ghhagurmukhi;0A5A ghook;0260 ghzsquare;3393 gihiragana;304E gikatakana;30AE gimarmenian;0563 gimel;05D2 gimeldagesh;FB32 gimeldageshhebrew;FB32 gimelhebrew;05D2 gjecyrillic;0453 glottalinvertedstroke;01BE glottalstop;0294 glottalstopinverted;0296 glottalstopmod;02C0 glottalstopreversed;0295 glottalstopreversedmod;02C1 glottalstopreversedsuperior;02E4 glottalstopstroke;02A1 glottalstopstrokereversed;02A2 gmacron;1E21 gmonospace;FF47 gohiragana;3054 gokatakana;30B4 gparen;24A2 gpasquare;33AC gradient;2207 grave;0060 gravebelowcmb;0316 gravecmb;0300 gravecomb;0300 gravedeva;0953 gravelowmod;02CE gravemonospace;FF40 gravetonecmb;0340 greater;003E greaterequal;2265 greaterequalorless;22DB greatermonospace;FF1E greaterorequivalent;2273 greaterorless;2277 greateroverequal;2267 greatersmall;FE65 gscript;0261 gstroke;01E5 guhiragana;3050 guillemotleft;00AB guillemotright;00BB guilsinglleft;2039 guilsinglright;203A gukatakana;30B0 guramusquare;3318 gysquare;33C9 h;0068 haabkhasiancyrillic;04A9 haaltonearabic;06C1 habengali;09B9 hadescendercyrillic;04B3 hadeva;0939 hagujarati;0AB9 hagurmukhi;0A39 haharabic;062D hahfinalarabic;FEA2 hahinitialarabic;FEA3 hahiragana;306F hahmedialarabic;FEA4 haitusquare;332A hakatakana;30CF hakatakanahalfwidth;FF8A halantgurmukhi;0A4D hamzaarabic;0621 hamzadammaarabic;0621 064F hamzadammatanarabic;0621 064C hamzafathaarabic;0621 064E hamzafathatanarabic;0621 064B hamzalowarabic;0621 hamzalowkasraarabic;0621 0650 hamzalowkasratanarabic;0621 064D hamzasukunarabic;0621 0652 hangulfiller;3164 hardsigncyrillic;044A harpoonleftbarbup;21BC harpoonrightbarbup;21C0 hasquare;33CA hatafpatah;05B2 hatafpatah16;05B2 hatafpatah23;05B2 hatafpatah2f;05B2 hatafpatahhebrew;05B2 hatafpatahnarrowhebrew;05B2 hatafpatahquarterhebrew;05B2 hatafpatahwidehebrew;05B2 hatafqamats;05B3 hatafqamats1b;05B3 hatafqamats28;05B3 hatafqamats34;05B3 hatafqamatshebrew;05B3 hatafqamatsnarrowhebrew;05B3 hatafqamatsquarterhebrew;05B3 hatafqamatswidehebrew;05B3 hatafsegol;05B1 hatafsegol17;05B1 hatafsegol24;05B1 hatafsegol30;05B1 hatafsegolhebrew;05B1 hatafsegolnarrowhebrew;05B1 hatafsegolquarterhebrew;05B1 hatafsegolwidehebrew;05B1 hbar;0127 hbopomofo;310F hbrevebelow;1E2B hcedilla;1E29 hcircle;24D7 hcircumflex;0125 hdieresis;1E27 hdotaccent;1E23 hdotbelow;1E25 he;05D4 heart;2665 heartsuitblack;2665 heartsuitwhite;2661 hedagesh;FB34 hedageshhebrew;FB34 hehaltonearabic;06C1 heharabic;0647 hehebrew;05D4 hehfinalaltonearabic;FBA7 hehfinalalttwoarabic;FEEA hehfinalarabic;FEEA hehhamzaabovefinalarabic;FBA5 hehhamzaaboveisolatedarabic;FBA4 hehinitialaltonearabic;FBA8 hehinitialarabic;FEEB hehiragana;3078 hehmedialaltonearabic;FBA9 hehmedialarabic;FEEC heiseierasquare;337B hekatakana;30D8 hekatakanahalfwidth;FF8D hekutaarusquare;3336 henghook;0267 herutusquare;3339 het;05D7 hethebrew;05D7 hhook;0266 hhooksuperior;02B1 hieuhacirclekorean;327B hieuhaparenkorean;321B hieuhcirclekorean;326D hieuhkorean;314E hieuhparenkorean;320D hihiragana;3072 hikatakana;30D2 hikatakanahalfwidth;FF8B hiriq;05B4 hiriq14;05B4 hiriq21;05B4 hiriq2d;05B4 hiriqhebrew;05B4 hiriqnarrowhebrew;05B4 hiriqquarterhebrew;05B4 hiriqwidehebrew;05B4 hlinebelow;1E96 hmonospace;FF48 hoarmenian;0570 hohipthai;0E2B hohiragana;307B hokatakana;30DB hokatakanahalfwidth;FF8E holam;05B9 holam19;05B9 holam26;05B9 holam32;05B9 holamhebrew;05B9 holamnarrowhebrew;05B9 holamquarterhebrew;05B9 holamwidehebrew;05B9 honokhukthai;0E2E hookabovecomb;0309 hookcmb;0309 hookpalatalizedbelowcmb;0321 hookretroflexbelowcmb;0322 hoonsquare;3342 horicoptic;03E9 horizontalbar;2015 horncmb;031B hotsprings;2668 house;2302 hparen;24A3 hsuperior;02B0 hturned;0265 huhiragana;3075 huiitosquare;3333 hukatakana;30D5 hukatakanahalfwidth;FF8C hungarumlaut;02DD hungarumlautcmb;030B hv;0195 hyphen;002D hypheninferior;F6E5 hyphenmonospace;FF0D hyphensmall;FE63 hyphensuperior;F6E6 hyphentwo;2010 i;0069 iacute;00ED iacyrillic;044F ibengali;0987 ibopomofo;3127 ibreve;012D icaron;01D0 icircle;24D8 icircumflex;00EE icyrillic;0456 idblgrave;0209 ideographearthcircle;328F ideographfirecircle;328B ideographicallianceparen;323F ideographiccallparen;323A ideographiccentrecircle;32A5 ideographicclose;3006 ideographiccomma;3001 ideographiccommaleft;FF64 ideographiccongratulationparen;3237 ideographiccorrectcircle;32A3 ideographicearthparen;322F ideographicenterpriseparen;323D ideographicexcellentcircle;329D ideographicfestivalparen;3240 ideographicfinancialcircle;3296 ideographicfinancialparen;3236 ideographicfireparen;322B ideographichaveparen;3232 ideographichighcircle;32A4 ideographiciterationmark;3005 ideographiclaborcircle;3298 ideographiclaborparen;3238 ideographicleftcircle;32A7 ideographiclowcircle;32A6 ideographicmedicinecircle;32A9 ideographicmetalparen;322E ideographicmoonparen;322A ideographicnameparen;3234 ideographicperiod;3002 ideographicprintcircle;329E ideographicreachparen;3243 ideographicrepresentparen;3239 ideographicresourceparen;323E ideographicrightcircle;32A8 ideographicsecretcircle;3299 ideographicselfparen;3242 ideographicsocietyparen;3233 ideographicspace;3000 ideographicspecialparen;3235 ideographicstockparen;3231 ideographicstudyparen;323B ideographicsunparen;3230 ideographicsuperviseparen;323C ideographicwaterparen;322C ideographicwoodparen;322D ideographiczero;3007 ideographmetalcircle;328E ideographmooncircle;328A ideographnamecircle;3294 ideographsuncircle;3290 ideographwatercircle;328C ideographwoodcircle;328D ideva;0907 idieresis;00EF idieresisacute;1E2F idieresiscyrillic;04E5 idotbelow;1ECB iebrevecyrillic;04D7 iecyrillic;0435 ieungacirclekorean;3275 ieungaparenkorean;3215 ieungcirclekorean;3267 ieungkorean;3147 ieungparenkorean;3207 igrave;00EC igujarati;0A87 igurmukhi;0A07 ihiragana;3044 ihookabove;1EC9 iibengali;0988 iicyrillic;0438 iideva;0908 iigujarati;0A88 iigurmukhi;0A08 iimatragurmukhi;0A40 iinvertedbreve;020B iishortcyrillic;0439 iivowelsignbengali;09C0 iivowelsigndeva;0940 iivowelsigngujarati;0AC0 ij;0133 ikatakana;30A4 ikatakanahalfwidth;FF72 ikorean;3163 ilde;02DC iluyhebrew;05AC imacron;012B imacroncyrillic;04E3 imageorapproximatelyequal;2253 imatragurmukhi;0A3F imonospace;FF49 increment;2206 infinity;221E iniarmenian;056B integral;222B integralbottom;2321 integralbt;2321 integralex;F8F5 integraltop;2320 integraltp;2320 intersection;2229 intisquare;3305 invbullet;25D8 invcircle;25D9 invsmileface;263B iocyrillic;0451 iogonek;012F iota;03B9 iotadieresis;03CA iotadieresistonos;0390 iotalatin;0269 iotatonos;03AF iparen;24A4 irigurmukhi;0A72 ismallhiragana;3043 ismallkatakana;30A3 ismallkatakanahalfwidth;FF68 issharbengali;09FA istroke;0268 isuperior;F6ED iterationhiragana;309D iterationkatakana;30FD itilde;0129 itildebelow;1E2D iubopomofo;3129 iucyrillic;044E ivowelsignbengali;09BF ivowelsigndeva;093F ivowelsigngujarati;0ABF izhitsacyrillic;0475 izhitsadblgravecyrillic;0477 j;006A jaarmenian;0571 jabengali;099C jadeva;091C jagujarati;0A9C jagurmukhi;0A1C jbopomofo;3110 jcaron;01F0 jcircle;24D9 jcircumflex;0135 jcrossedtail;029D jdotlessstroke;025F jecyrillic;0458 jeemarabic;062C jeemfinalarabic;FE9E jeeminitialarabic;FE9F jeemmedialarabic;FEA0 jeharabic;0698 jehfinalarabic;FB8B jhabengali;099D jhadeva;091D jhagujarati;0A9D jhagurmukhi;0A1D jheharmenian;057B jis;3004 jmonospace;FF4A jparen;24A5 jsuperior;02B2 k;006B kabashkircyrillic;04A1 kabengali;0995 kacute;1E31 kacyrillic;043A kadescendercyrillic;049B kadeva;0915 kaf;05DB kafarabic;0643 kafdagesh;FB3B kafdageshhebrew;FB3B kaffinalarabic;FEDA kafhebrew;05DB kafinitialarabic;FEDB kafmedialarabic;FEDC kafrafehebrew;FB4D kagujarati;0A95 kagurmukhi;0A15 kahiragana;304B kahookcyrillic;04C4 kakatakana;30AB kakatakanahalfwidth;FF76 kappa;03BA kappasymbolgreek;03F0 kapyeounmieumkorean;3171 kapyeounphieuphkorean;3184 kapyeounpieupkorean;3178 kapyeounssangpieupkorean;3179 karoriisquare;330D kashidaautoarabic;0640 kashidaautonosidebearingarabic;0640 kasmallkatakana;30F5 kasquare;3384 kasraarabic;0650 kasratanarabic;064D kastrokecyrillic;049F katahiraprolongmarkhalfwidth;FF70 kaverticalstrokecyrillic;049D kbopomofo;310E kcalsquare;3389 kcaron;01E9 kcedilla;0137 kcircle;24DA kcommaaccent;0137 kdotbelow;1E33 keharmenian;0584 kehiragana;3051 kekatakana;30B1 kekatakanahalfwidth;FF79 kenarmenian;056F kesmallkatakana;30F6 kgreenlandic;0138 khabengali;0996 khacyrillic;0445 khadeva;0916 khagujarati;0A96 khagurmukhi;0A16 khaharabic;062E khahfinalarabic;FEA6 khahinitialarabic;FEA7 khahmedialarabic;FEA8 kheicoptic;03E7 khhadeva;0959 khhagurmukhi;0A59 khieukhacirclekorean;3278 khieukhaparenkorean;3218 khieukhcirclekorean;326A khieukhkorean;314B khieukhparenkorean;320A khokhaithai;0E02 khokhonthai;0E05 khokhuatthai;0E03 khokhwaithai;0E04 khomutthai;0E5B khook;0199 khorakhangthai;0E06 khzsquare;3391 kihiragana;304D kikatakana;30AD kikatakanahalfwidth;FF77 kiroguramusquare;3315 kiromeetorusquare;3316 kirosquare;3314 kiyeokacirclekorean;326E kiyeokaparenkorean;320E kiyeokcirclekorean;3260 kiyeokkorean;3131 kiyeokparenkorean;3200 kiyeoksioskorean;3133 kjecyrillic;045C klinebelow;1E35 klsquare;3398 kmcubedsquare;33A6 kmonospace;FF4B kmsquaredsquare;33A2 kohiragana;3053 kohmsquare;33C0 kokaithai;0E01 kokatakana;30B3 kokatakanahalfwidth;FF7A kooposquare;331E koppacyrillic;0481 koreanstandardsymbol;327F koroniscmb;0343 kparen;24A6 kpasquare;33AA ksicyrillic;046F ktsquare;33CF kturned;029E kuhiragana;304F kukatakana;30AF kukatakanahalfwidth;FF78 kvsquare;33B8 kwsquare;33BE l;006C labengali;09B2 lacute;013A ladeva;0932 lagujarati;0AB2 lagurmukhi;0A32 lakkhangyaothai;0E45 lamaleffinalarabic;FEFC lamalefhamzaabovefinalarabic;FEF8 lamalefhamzaaboveisolatedarabic;FEF7 lamalefhamzabelowfinalarabic;FEFA lamalefhamzabelowisolatedarabic;FEF9 lamalefisolatedarabic;FEFB lamalefmaddaabovefinalarabic;FEF6 lamalefmaddaaboveisolatedarabic;FEF5 lamarabic;0644 lambda;03BB lambdastroke;019B lamed;05DC lameddagesh;FB3C lameddageshhebrew;FB3C lamedhebrew;05DC lamedholam;05DC 05B9 lamedholamdagesh;05DC 05B9 05BC lamedholamdageshhebrew;05DC 05B9 05BC lamedholamhebrew;05DC 05B9 lamfinalarabic;FEDE lamhahinitialarabic;FCCA laminitialarabic;FEDF lamjeeminitialarabic;FCC9 lamkhahinitialarabic;FCCB lamlamhehisolatedarabic;FDF2 lammedialarabic;FEE0 lammeemhahinitialarabic;FD88 lammeeminitialarabic;FCCC lammeemjeeminitialarabic;FEDF FEE4 FEA0 lammeemkhahinitialarabic;FEDF FEE4 FEA8 largecircle;25EF lbar;019A lbelt;026C lbopomofo;310C lcaron;013E lcedilla;013C lcircle;24DB lcircumflexbelow;1E3D lcommaaccent;013C ldot;0140 ldotaccent;0140 ldotbelow;1E37 ldotbelowmacron;1E39 leftangleabovecmb;031A lefttackbelowcmb;0318 less;003C lessequal;2264 lessequalorgreater;22DA lessmonospace;FF1C lessorequivalent;2272 lessorgreater;2276 lessoverequal;2266 lesssmall;FE64 lezh;026E lfblock;258C lhookretroflex;026D lira;20A4 liwnarmenian;056C lj;01C9 ljecyrillic;0459 ll;F6C0 lladeva;0933 llagujarati;0AB3 llinebelow;1E3B llladeva;0934 llvocalicbengali;09E1 llvocalicdeva;0961 llvocalicvowelsignbengali;09E3 llvocalicvowelsigndeva;0963 lmiddletilde;026B lmonospace;FF4C lmsquare;33D0 lochulathai;0E2C logicaland;2227 logicalnot;00AC logicalnotreversed;2310 logicalor;2228 lolingthai;0E25 longs;017F lowlinecenterline;FE4E lowlinecmb;0332 lowlinedashed;FE4D lozenge;25CA lparen;24A7 lslash;0142 lsquare;2113 lsuperior;F6EE ltshade;2591 luthai;0E26 lvocalicbengali;098C lvocalicdeva;090C lvocalicvowelsignbengali;09E2 lvocalicvowelsigndeva;0962 lxsquare;33D3 m;006D mabengali;09AE macron;00AF macronbelowcmb;0331 macroncmb;0304 macronlowmod;02CD macronmonospace;FFE3 macute;1E3F madeva;092E magujarati;0AAE magurmukhi;0A2E mahapakhhebrew;05A4 mahapakhlefthebrew;05A4 mahiragana;307E maichattawalowleftthai;F895 maichattawalowrightthai;F894 maichattawathai;0E4B maichattawaupperleftthai;F893 maieklowleftthai;F88C maieklowrightthai;F88B maiekthai;0E48 maiekupperleftthai;F88A maihanakatleftthai;F884 maihanakatthai;0E31 maitaikhuleftthai;F889 maitaikhuthai;0E47 maitholowleftthai;F88F maitholowrightthai;F88E maithothai;0E49 maithoupperleftthai;F88D maitrilowleftthai;F892 maitrilowrightthai;F891 maitrithai;0E4A maitriupperleftthai;F890 maiyamokthai;0E46 makatakana;30DE makatakanahalfwidth;FF8F male;2642 mansyonsquare;3347 maqafhebrew;05BE mars;2642 masoracirclehebrew;05AF masquare;3383 mbopomofo;3107 mbsquare;33D4 mcircle;24DC mcubedsquare;33A5 mdotaccent;1E41 mdotbelow;1E43 meemarabic;0645 meemfinalarabic;FEE2 meeminitialarabic;FEE3 meemmedialarabic;FEE4 meemmeeminitialarabic;FCD1 meemmeemisolatedarabic;FC48 meetorusquare;334D mehiragana;3081 meizierasquare;337E mekatakana;30E1 mekatakanahalfwidth;FF92 mem;05DE memdagesh;FB3E memdageshhebrew;FB3E memhebrew;05DE menarmenian;0574 merkhahebrew;05A5 merkhakefulahebrew;05A6 merkhakefulalefthebrew;05A6 merkhalefthebrew;05A5 mhook;0271 mhzsquare;3392 middledotkatakanahalfwidth;FF65 middot;00B7 mieumacirclekorean;3272 mieumaparenkorean;3212 mieumcirclekorean;3264 mieumkorean;3141 mieumpansioskorean;3170 mieumparenkorean;3204 mieumpieupkorean;316E mieumsioskorean;316F mihiragana;307F mikatakana;30DF mikatakanahalfwidth;FF90 minus;2212 minusbelowcmb;0320 minuscircle;2296 minusmod;02D7 minusplus;2213 minute;2032 miribaarusquare;334A mirisquare;3349 mlonglegturned;0270 mlsquare;3396 mmcubedsquare;33A3 mmonospace;FF4D mmsquaredsquare;339F mohiragana;3082 mohmsquare;33C1 mokatakana;30E2 mokatakanahalfwidth;FF93 molsquare;33D6 momathai;0E21 moverssquare;33A7 moverssquaredsquare;33A8 mparen;24A8 mpasquare;33AB mssquare;33B3 msuperior;F6EF mturned;026F mu;00B5 mu1;00B5 muasquare;3382 muchgreater;226B muchless;226A mufsquare;338C mugreek;03BC mugsquare;338D muhiragana;3080 mukatakana;30E0 mukatakanahalfwidth;FF91 mulsquare;3395 multiply;00D7 mumsquare;339B munahhebrew;05A3 munahlefthebrew;05A3 musicalnote;266A musicalnotedbl;266B musicflatsign;266D musicsharpsign;266F mussquare;33B2 muvsquare;33B6 muwsquare;33BC mvmegasquare;33B9 mvsquare;33B7 mwmegasquare;33BF mwsquare;33BD n;006E nabengali;09A8 nabla;2207 nacute;0144 nadeva;0928 nagujarati;0AA8 nagurmukhi;0A28 nahiragana;306A nakatakana;30CA nakatakanahalfwidth;FF85 napostrophe;0149 nasquare;3381 nbopomofo;310B nbspace;00A0 ncaron;0148 ncedilla;0146 ncircle;24DD ncircumflexbelow;1E4B ncommaaccent;0146 ndotaccent;1E45 ndotbelow;1E47 nehiragana;306D nekatakana;30CD nekatakanahalfwidth;FF88 newsheqelsign;20AA nfsquare;338B ngabengali;0999 ngadeva;0919 ngagujarati;0A99 ngagurmukhi;0A19 ngonguthai;0E07 nhiragana;3093 nhookleft;0272 nhookretroflex;0273 nieunacirclekorean;326F nieunaparenkorean;320F nieuncieuckorean;3135 nieuncirclekorean;3261 nieunhieuhkorean;3136 nieunkorean;3134 nieunpansioskorean;3168 nieunparenkorean;3201 nieunsioskorean;3167 nieuntikeutkorean;3166 nihiragana;306B nikatakana;30CB nikatakanahalfwidth;FF86 nikhahitleftthai;F899 nikhahitthai;0E4D nine;0039 ninearabic;0669 ninebengali;09EF ninecircle;2468 ninecircleinversesansserif;2792 ninedeva;096F ninegujarati;0AEF ninegurmukhi;0A6F ninehackarabic;0669 ninehangzhou;3029 nineideographicparen;3228 nineinferior;2089 ninemonospace;FF19 nineoldstyle;F739 nineparen;247C nineperiod;2490 ninepersian;06F9 nineroman;2178 ninesuperior;2079 nineteencircle;2472 nineteenparen;2486 nineteenperiod;249A ninethai;0E59 nj;01CC njecyrillic;045A nkatakana;30F3 nkatakanahalfwidth;FF9D nlegrightlong;019E nlinebelow;1E49 nmonospace;FF4E nmsquare;339A nnabengali;09A3 nnadeva;0923 nnagujarati;0AA3 nnagurmukhi;0A23 nnnadeva;0929 nohiragana;306E nokatakana;30CE nokatakanahalfwidth;FF89 nonbreakingspace;00A0 nonenthai;0E13 nonuthai;0E19 noonarabic;0646 noonfinalarabic;FEE6 noonghunnaarabic;06BA noonghunnafinalarabic;FB9F noonhehinitialarabic;FEE7 FEEC nooninitialarabic;FEE7 noonjeeminitialarabic;FCD2 noonjeemisolatedarabic;FC4B noonmedialarabic;FEE8 noonmeeminitialarabic;FCD5 noonmeemisolatedarabic;FC4E noonnoonfinalarabic;FC8D notcontains;220C notelement;2209 notelementof;2209 notequal;2260 notgreater;226F notgreaternorequal;2271 notgreaternorless;2279 notidentical;2262 notless;226E notlessnorequal;2270 notparallel;2226 notprecedes;2280 notsubset;2284 notsucceeds;2281 notsuperset;2285 nowarmenian;0576 nparen;24A9 nssquare;33B1 nsuperior;207F ntilde;00F1 nu;03BD nuhiragana;306C nukatakana;30CC nukatakanahalfwidth;FF87 nuktabengali;09BC nuktadeva;093C nuktagujarati;0ABC nuktagurmukhi;0A3C numbersign;0023 numbersignmonospace;FF03 numbersignsmall;FE5F numeralsigngreek;0374 numeralsignlowergreek;0375 numero;2116 nun;05E0 nundagesh;FB40 nundageshhebrew;FB40 nunhebrew;05E0 nvsquare;33B5 nwsquare;33BB nyabengali;099E nyadeva;091E nyagujarati;0A9E nyagurmukhi;0A1E o;006F oacute;00F3 oangthai;0E2D obarred;0275 obarredcyrillic;04E9 obarreddieresiscyrillic;04EB obengali;0993 obopomofo;311B obreve;014F ocandradeva;0911 ocandragujarati;0A91 ocandravowelsigndeva;0949 ocandravowelsigngujarati;0AC9 ocaron;01D2 ocircle;24DE ocircumflex;00F4 ocircumflexacute;1ED1 ocircumflexdotbelow;1ED9 ocircumflexgrave;1ED3 ocircumflexhookabove;1ED5 ocircumflextilde;1ED7 ocyrillic;043E odblacute;0151 odblgrave;020D odeva;0913 odieresis;00F6 odieresiscyrillic;04E7 odotbelow;1ECD oe;0153 oekorean;315A ogonek;02DB ogonekcmb;0328 ograve;00F2 ogujarati;0A93 oharmenian;0585 ohiragana;304A ohookabove;1ECF ohorn;01A1 ohornacute;1EDB ohorndotbelow;1EE3 ohorngrave;1EDD ohornhookabove;1EDF ohorntilde;1EE1 ohungarumlaut;0151 oi;01A3 oinvertedbreve;020F okatakana;30AA okatakanahalfwidth;FF75 okorean;3157 olehebrew;05AB omacron;014D omacronacute;1E53 omacrongrave;1E51 omdeva;0950 omega;03C9 omega1;03D6 omegacyrillic;0461 omegalatinclosed;0277 omegaroundcyrillic;047B omegatitlocyrillic;047D omegatonos;03CE omgujarati;0AD0 omicron;03BF omicrontonos;03CC omonospace;FF4F one;0031 onearabic;0661 onebengali;09E7 onecircle;2460 onecircleinversesansserif;278A onedeva;0967 onedotenleader;2024 oneeighth;215B onefitted;F6DC onegujarati;0AE7 onegurmukhi;0A67 onehackarabic;0661 onehalf;00BD onehangzhou;3021 oneideographicparen;3220 oneinferior;2081 onemonospace;FF11 onenumeratorbengali;09F4 oneoldstyle;F731 oneparen;2474 oneperiod;2488 onepersian;06F1 onequarter;00BC oneroman;2170 onesuperior;00B9 onethai;0E51 onethird;2153 oogonek;01EB oogonekmacron;01ED oogurmukhi;0A13 oomatragurmukhi;0A4B oopen;0254 oparen;24AA openbullet;25E6 option;2325 ordfeminine;00AA ordmasculine;00BA orthogonal;221F oshortdeva;0912 oshortvowelsigndeva;094A oslash;00F8 oslashacute;01FF osmallhiragana;3049 osmallkatakana;30A9 osmallkatakanahalfwidth;FF6B ostrokeacute;01FF osuperior;F6F0 otcyrillic;047F otilde;00F5 otildeacute;1E4D otildedieresis;1E4F oubopomofo;3121 overline;203E overlinecenterline;FE4A overlinecmb;0305 overlinedashed;FE49 overlinedblwavy;FE4C overlinewavy;FE4B overscore;00AF ovowelsignbengali;09CB ovowelsigndeva;094B ovowelsigngujarati;0ACB p;0070 paampssquare;3380 paasentosquare;332B pabengali;09AA pacute;1E55 padeva;092A pagedown;21DF pageup;21DE pagujarati;0AAA pagurmukhi;0A2A pahiragana;3071 paiyannoithai;0E2F pakatakana;30D1 palatalizationcyrilliccmb;0484 palochkacyrillic;04C0 pansioskorean;317F paragraph;00B6 parallel;2225 parenleft;0028 parenleftaltonearabic;FD3E parenleftbt;F8ED parenleftex;F8EC parenleftinferior;208D parenleftmonospace;FF08 parenleftsmall;FE59 parenleftsuperior;207D parenlefttp;F8EB parenleftvertical;FE35 parenright;0029 parenrightaltonearabic;FD3F parenrightbt;F8F8 parenrightex;F8F7 parenrightinferior;208E parenrightmonospace;FF09 parenrightsmall;FE5A parenrightsuperior;207E parenrighttp;F8F6 parenrightvertical;FE36 partialdiff;2202 paseqhebrew;05C0 pashtahebrew;0599 pasquare;33A9 patah;05B7 patah11;05B7 patah1d;05B7 patah2a;05B7 patahhebrew;05B7 patahnarrowhebrew;05B7 patahquarterhebrew;05B7 patahwidehebrew;05B7 pazerhebrew;05A1 pbopomofo;3106 pcircle;24DF pdotaccent;1E57 pe;05E4 pecyrillic;043F pedagesh;FB44 pedageshhebrew;FB44 peezisquare;333B pefinaldageshhebrew;FB43 peharabic;067E peharmenian;057A pehebrew;05E4 pehfinalarabic;FB57 pehinitialarabic;FB58 pehiragana;307A pehmedialarabic;FB59 pekatakana;30DA pemiddlehookcyrillic;04A7 perafehebrew;FB4E percent;0025 percentarabic;066A percentmonospace;FF05 percentsmall;FE6A period;002E periodarmenian;0589 periodcentered;00B7 periodhalfwidth;FF61 periodinferior;F6E7 periodmonospace;FF0E periodsmall;FE52 periodsuperior;F6E8 perispomenigreekcmb;0342 perpendicular;22A5 perthousand;2030 peseta;20A7 pfsquare;338A phabengali;09AB phadeva;092B phagujarati;0AAB phagurmukhi;0A2B phi;03C6 phi1;03D5 phieuphacirclekorean;327A phieuphaparenkorean;321A phieuphcirclekorean;326C phieuphkorean;314D phieuphparenkorean;320C philatin;0278 phinthuthai;0E3A phisymbolgreek;03D5 phook;01A5 phophanthai;0E1E phophungthai;0E1C phosamphaothai;0E20 pi;03C0 pieupacirclekorean;3273 pieupaparenkorean;3213 pieupcieuckorean;3176 pieupcirclekorean;3265 pieupkiyeokkorean;3172 pieupkorean;3142 pieupparenkorean;3205 pieupsioskiyeokkorean;3174 pieupsioskorean;3144 pieupsiostikeutkorean;3175 pieupthieuthkorean;3177 pieuptikeutkorean;3173 pihiragana;3074 pikatakana;30D4 pisymbolgreek;03D6 piwrarmenian;0583 plus;002B plusbelowcmb;031F pluscircle;2295 plusminus;00B1 plusmod;02D6 plusmonospace;FF0B plussmall;FE62 plussuperior;207A pmonospace;FF50 pmsquare;33D8 pohiragana;307D pointingindexdownwhite;261F pointingindexleftwhite;261C pointingindexrightwhite;261E pointingindexupwhite;261D pokatakana;30DD poplathai;0E1B postalmark;3012 postalmarkface;3020 pparen;24AB precedes;227A prescription;211E primemod;02B9 primereversed;2035 product;220F projective;2305 prolongedkana;30FC propellor;2318 propersubset;2282 propersuperset;2283 proportion;2237 proportional;221D psi;03C8 psicyrillic;0471 psilipneumatacyrilliccmb;0486 pssquare;33B0 puhiragana;3077 pukatakana;30D7 pvsquare;33B4 pwsquare;33BA q;0071 qadeva;0958 qadmahebrew;05A8 qafarabic;0642 qaffinalarabic;FED6 qafinitialarabic;FED7 qafmedialarabic;FED8 qamats;05B8 qamats10;05B8 qamats1a;05B8 qamats1c;05B8 qamats27;05B8 qamats29;05B8 qamats33;05B8 qamatsde;05B8 qamatshebrew;05B8 qamatsnarrowhebrew;05B8 qamatsqatanhebrew;05B8 qamatsqatannarrowhebrew;05B8 qamatsqatanquarterhebrew;05B8 qamatsqatanwidehebrew;05B8 qamatsquarterhebrew;05B8 qamatswidehebrew;05B8 qarneyparahebrew;059F qbopomofo;3111 qcircle;24E0 qhook;02A0 qmonospace;FF51 qof;05E7 qofdagesh;FB47 qofdageshhebrew;FB47 qofhatafpatah;05E7 05B2 qofhatafpatahhebrew;05E7 05B2 qofhatafsegol;05E7 05B1 qofhatafsegolhebrew;05E7 05B1 qofhebrew;05E7 qofhiriq;05E7 05B4 qofhiriqhebrew;05E7 05B4 qofholam;05E7 05B9 qofholamhebrew;05E7 05B9 qofpatah;05E7 05B7 qofpatahhebrew;05E7 05B7 qofqamats;05E7 05B8 qofqamatshebrew;05E7 05B8 qofqubuts;05E7 05BB qofqubutshebrew;05E7 05BB qofsegol;05E7 05B6 qofsegolhebrew;05E7 05B6 qofsheva;05E7 05B0 qofshevahebrew;05E7 05B0 qoftsere;05E7 05B5 qoftserehebrew;05E7 05B5 qparen;24AC quarternote;2669 qubuts;05BB qubuts18;05BB qubuts25;05BB qubuts31;05BB qubutshebrew;05BB qubutsnarrowhebrew;05BB qubutsquarterhebrew;05BB qubutswidehebrew;05BB question;003F questionarabic;061F questionarmenian;055E questiondown;00BF questiondownsmall;F7BF questiongreek;037E questionmonospace;FF1F questionsmall;F73F quotedbl;0022 quotedblbase;201E quotedblleft;201C quotedblmonospace;FF02 quotedblprime;301E quotedblprimereversed;301D quotedblright;201D quoteleft;2018 quoteleftreversed;201B quotereversed;201B quoteright;2019 quoterightn;0149 quotesinglbase;201A quotesingle;0027 quotesinglemonospace;FF07 r;0072 raarmenian;057C rabengali;09B0 racute;0155 radeva;0930 radical;221A radicalex;F8E5 radoverssquare;33AE radoverssquaredsquare;33AF radsquare;33AD rafe;05BF rafehebrew;05BF ragujarati;0AB0 ragurmukhi;0A30 rahiragana;3089 rakatakana;30E9 rakatakanahalfwidth;FF97 ralowerdiagonalbengali;09F1 ramiddlediagonalbengali;09F0 ramshorn;0264 ratio;2236 rbopomofo;3116 rcaron;0159 rcedilla;0157 rcircle;24E1 rcommaaccent;0157 rdblgrave;0211 rdotaccent;1E59 rdotbelow;1E5B rdotbelowmacron;1E5D referencemark;203B reflexsubset;2286 reflexsuperset;2287 registered;00AE registersans;F8E8 registerserif;F6DA reharabic;0631 reharmenian;0580 rehfinalarabic;FEAE rehiragana;308C rehyehaleflamarabic;0631 FEF3 FE8E 0644 rekatakana;30EC rekatakanahalfwidth;FF9A resh;05E8 reshdageshhebrew;FB48 reshhatafpatah;05E8 05B2 reshhatafpatahhebrew;05E8 05B2 reshhatafsegol;05E8 05B1 reshhatafsegolhebrew;05E8 05B1 reshhebrew;05E8 reshhiriq;05E8 05B4 reshhiriqhebrew;05E8 05B4 reshholam;05E8 05B9 reshholamhebrew;05E8 05B9 reshpatah;05E8 05B7 reshpatahhebrew;05E8 05B7 reshqamats;05E8 05B8 reshqamatshebrew;05E8 05B8 reshqubuts;05E8 05BB reshqubutshebrew;05E8 05BB reshsegol;05E8 05B6 reshsegolhebrew;05E8 05B6 reshsheva;05E8 05B0 reshshevahebrew;05E8 05B0 reshtsere;05E8 05B5 reshtserehebrew;05E8 05B5 reversedtilde;223D reviahebrew;0597 reviamugrashhebrew;0597 revlogicalnot;2310 rfishhook;027E rfishhookreversed;027F rhabengali;09DD rhadeva;095D rho;03C1 rhook;027D rhookturned;027B rhookturnedsuperior;02B5 rhosymbolgreek;03F1 rhotichookmod;02DE rieulacirclekorean;3271 rieulaparenkorean;3211 rieulcirclekorean;3263 rieulhieuhkorean;3140 rieulkiyeokkorean;313A rieulkiyeoksioskorean;3169 rieulkorean;3139 rieulmieumkorean;313B rieulpansioskorean;316C rieulparenkorean;3203 rieulphieuphkorean;313F rieulpieupkorean;313C rieulpieupsioskorean;316B rieulsioskorean;313D rieulthieuthkorean;313E rieultikeutkorean;316A rieulyeorinhieuhkorean;316D rightangle;221F righttackbelowcmb;0319 righttriangle;22BF rihiragana;308A rikatakana;30EA rikatakanahalfwidth;FF98 ring;02DA ringbelowcmb;0325 ringcmb;030A ringhalfleft;02BF ringhalfleftarmenian;0559 ringhalfleftbelowcmb;031C ringhalfleftcentered;02D3 ringhalfright;02BE ringhalfrightbelowcmb;0339 ringhalfrightcentered;02D2 rinvertedbreve;0213 rittorusquare;3351 rlinebelow;1E5F rlongleg;027C rlonglegturned;027A rmonospace;FF52 rohiragana;308D rokatakana;30ED rokatakanahalfwidth;FF9B roruathai;0E23 rparen;24AD rrabengali;09DC rradeva;0931 rragurmukhi;0A5C rreharabic;0691 rrehfinalarabic;FB8D rrvocalicbengali;09E0 rrvocalicdeva;0960 rrvocalicgujarati;0AE0 rrvocalicvowelsignbengali;09C4 rrvocalicvowelsigndeva;0944 rrvocalicvowelsigngujarati;0AC4 rsuperior;F6F1 rtblock;2590 rturned;0279 rturnedsuperior;02B4 ruhiragana;308B rukatakana;30EB rukatakanahalfwidth;FF99 rupeemarkbengali;09F2 rupeesignbengali;09F3 rupiah;F6DD ruthai;0E24 rvocalicbengali;098B rvocalicdeva;090B rvocalicgujarati;0A8B rvocalicvowelsignbengali;09C3 rvocalicvowelsigndeva;0943 rvocalicvowelsigngujarati;0AC3 s;0073 sabengali;09B8 sacute;015B sacutedotaccent;1E65 sadarabic;0635 sadeva;0938 sadfinalarabic;FEBA sadinitialarabic;FEBB sadmedialarabic;FEBC sagujarati;0AB8 sagurmukhi;0A38 sahiragana;3055 sakatakana;30B5 sakatakanahalfwidth;FF7B sallallahoualayhewasallamarabic;FDFA samekh;05E1 samekhdagesh;FB41 samekhdageshhebrew;FB41 samekhhebrew;05E1 saraaathai;0E32 saraaethai;0E41 saraaimaimalaithai;0E44 saraaimaimuanthai;0E43 saraamthai;0E33 saraathai;0E30 saraethai;0E40 saraiileftthai;F886 saraiithai;0E35 saraileftthai;F885 saraithai;0E34 saraothai;0E42 saraueeleftthai;F888 saraueethai;0E37 saraueleftthai;F887 sarauethai;0E36 sarauthai;0E38 sarauuthai;0E39 sbopomofo;3119 scaron;0161 scarondotaccent;1E67 scedilla;015F schwa;0259 schwacyrillic;04D9 schwadieresiscyrillic;04DB schwahook;025A scircle;24E2 scircumflex;015D scommaaccent;0219 sdotaccent;1E61 sdotbelow;1E63 sdotbelowdotaccent;1E69 seagullbelowcmb;033C second;2033 secondtonechinese;02CA section;00A7 seenarabic;0633 seenfinalarabic;FEB2 seeninitialarabic;FEB3 seenmedialarabic;FEB4 segol;05B6 segol13;05B6 segol1f;05B6 segol2c;05B6 segolhebrew;05B6 segolnarrowhebrew;05B6 segolquarterhebrew;05B6 segoltahebrew;0592 segolwidehebrew;05B6 seharmenian;057D sehiragana;305B sekatakana;30BB sekatakanahalfwidth;FF7E semicolon;003B semicolonarabic;061B semicolonmonospace;FF1B semicolonsmall;FE54 semivoicedmarkkana;309C semivoicedmarkkanahalfwidth;FF9F sentisquare;3322 sentosquare;3323 seven;0037 sevenarabic;0667 sevenbengali;09ED sevencircle;2466 sevencircleinversesansserif;2790 sevendeva;096D seveneighths;215E sevengujarati;0AED sevengurmukhi;0A6D sevenhackarabic;0667 sevenhangzhou;3027 sevenideographicparen;3226 seveninferior;2087 sevenmonospace;FF17 sevenoldstyle;F737 sevenparen;247A sevenperiod;248E sevenpersian;06F7 sevenroman;2176 sevensuperior;2077 seventeencircle;2470 seventeenparen;2484 seventeenperiod;2498 seventhai;0E57 sfthyphen;00AD shaarmenian;0577 shabengali;09B6 shacyrillic;0448 shaddaarabic;0651 shaddadammaarabic;FC61 shaddadammatanarabic;FC5E shaddafathaarabic;FC60 shaddafathatanarabic;0651 064B shaddakasraarabic;FC62 shaddakasratanarabic;FC5F shade;2592 shadedark;2593 shadelight;2591 shademedium;2592 shadeva;0936 shagujarati;0AB6 shagurmukhi;0A36 shalshelethebrew;0593 shbopomofo;3115 shchacyrillic;0449 sheenarabic;0634 sheenfinalarabic;FEB6 sheeninitialarabic;FEB7 sheenmedialarabic;FEB8 sheicoptic;03E3 sheqel;20AA sheqelhebrew;20AA sheva;05B0 sheva115;05B0 sheva15;05B0 sheva22;05B0 sheva2e;05B0 shevahebrew;05B0 shevanarrowhebrew;05B0 shevaquarterhebrew;05B0 shevawidehebrew;05B0 shhacyrillic;04BB shimacoptic;03ED shin;05E9 shindagesh;FB49 shindageshhebrew;FB49 shindageshshindot;FB2C shindageshshindothebrew;FB2C shindageshsindot;FB2D shindageshsindothebrew;FB2D shindothebrew;05C1 shinhebrew;05E9 shinshindot;FB2A shinshindothebrew;FB2A shinsindot;FB2B shinsindothebrew;FB2B shook;0282 sigma;03C3 sigma1;03C2 sigmafinal;03C2 sigmalunatesymbolgreek;03F2 sihiragana;3057 sikatakana;30B7 sikatakanahalfwidth;FF7C siluqhebrew;05BD siluqlefthebrew;05BD similar;223C sindothebrew;05C2 siosacirclekorean;3274 siosaparenkorean;3214 sioscieuckorean;317E sioscirclekorean;3266 sioskiyeokkorean;317A sioskorean;3145 siosnieunkorean;317B siosparenkorean;3206 siospieupkorean;317D siostikeutkorean;317C six;0036 sixarabic;0666 sixbengali;09EC sixcircle;2465 sixcircleinversesansserif;278F sixdeva;096C sixgujarati;0AEC sixgurmukhi;0A6C sixhackarabic;0666 sixhangzhou;3026 sixideographicparen;3225 sixinferior;2086 sixmonospace;FF16 sixoldstyle;F736 sixparen;2479 sixperiod;248D sixpersian;06F6 sixroman;2175 sixsuperior;2076 sixteencircle;246F sixteencurrencydenominatorbengali;09F9 sixteenparen;2483 sixteenperiod;2497 sixthai;0E56 slash;002F slashmonospace;FF0F slong;017F slongdotaccent;1E9B smileface;263A smonospace;FF53 sofpasuqhebrew;05C3 softhyphen;00AD softsigncyrillic;044C sohiragana;305D sokatakana;30BD sokatakanahalfwidth;FF7F soliduslongoverlaycmb;0338 solidusshortoverlaycmb;0337 sorusithai;0E29 sosalathai;0E28 sosothai;0E0B sosuathai;0E2A space;0020 spacehackarabic;0020 spade;2660 spadesuitblack;2660 spadesuitwhite;2664 sparen;24AE squarebelowcmb;033B squarecc;33C4 squarecm;339D squarediagonalcrosshatchfill;25A9 squarehorizontalfill;25A4 squarekg;338F squarekm;339E squarekmcapital;33CE squareln;33D1 squarelog;33D2 squaremg;338E squaremil;33D5 squaremm;339C squaremsquared;33A1 squareorthogonalcrosshatchfill;25A6 squareupperlefttolowerrightfill;25A7 squareupperrighttolowerleftfill;25A8 squareverticalfill;25A5 squarewhitewithsmallblack;25A3 srsquare;33DB ssabengali;09B7 ssadeva;0937 ssagujarati;0AB7 ssangcieuckorean;3149 ssanghieuhkorean;3185 ssangieungkorean;3180 ssangkiyeokkorean;3132 ssangnieunkorean;3165 ssangpieupkorean;3143 ssangsioskorean;3146 ssangtikeutkorean;3138 ssuperior;F6F2 sterling;00A3 sterlingmonospace;FFE1 strokelongoverlaycmb;0336 strokeshortoverlaycmb;0335 subset;2282 subsetnotequal;228A subsetorequal;2286 succeeds;227B suchthat;220B suhiragana;3059 sukatakana;30B9 sukatakanahalfwidth;FF7D sukunarabic;0652 summation;2211 sun;263C superset;2283 supersetnotequal;228B supersetorequal;2287 svsquare;33DC syouwaerasquare;337C t;0074 tabengali;09A4 tackdown;22A4 tackleft;22A3 tadeva;0924 tagujarati;0AA4 tagurmukhi;0A24 taharabic;0637 tahfinalarabic;FEC2 tahinitialarabic;FEC3 tahiragana;305F tahmedialarabic;FEC4 taisyouerasquare;337D takatakana;30BF takatakanahalfwidth;FF80 tatweelarabic;0640 tau;03C4 tav;05EA tavdages;FB4A tavdagesh;FB4A tavdageshhebrew;FB4A tavhebrew;05EA tbar;0167 tbopomofo;310A tcaron;0165 tccurl;02A8 tcedilla;0163 tcheharabic;0686 tchehfinalarabic;FB7B tchehinitialarabic;FB7C tchehmedialarabic;FB7D tchehmeeminitialarabic;FB7C FEE4 tcircle;24E3 tcircumflexbelow;1E71 tcommaaccent;0163 tdieresis;1E97 tdotaccent;1E6B tdotbelow;1E6D tecyrillic;0442 tedescendercyrillic;04AD teharabic;062A tehfinalarabic;FE96 tehhahinitialarabic;FCA2 tehhahisolatedarabic;FC0C tehinitialarabic;FE97 tehiragana;3066 tehjeeminitialarabic;FCA1 tehjeemisolatedarabic;FC0B tehmarbutaarabic;0629 tehmarbutafinalarabic;FE94 tehmedialarabic;FE98 tehmeeminitialarabic;FCA4 tehmeemisolatedarabic;FC0E tehnoonfinalarabic;FC73 tekatakana;30C6 tekatakanahalfwidth;FF83 telephone;2121 telephoneblack;260E telishagedolahebrew;05A0 telishaqetanahebrew;05A9 tencircle;2469 tenideographicparen;3229 tenparen;247D tenperiod;2491 tenroman;2179 tesh;02A7 tet;05D8 tetdagesh;FB38 tetdageshhebrew;FB38 tethebrew;05D8 tetsecyrillic;04B5 tevirhebrew;059B tevirlefthebrew;059B thabengali;09A5 thadeva;0925 thagujarati;0AA5 thagurmukhi;0A25 thalarabic;0630 thalfinalarabic;FEAC thanthakhatlowleftthai;F898 thanthakhatlowrightthai;F897 thanthakhatthai;0E4C thanthakhatupperleftthai;F896 theharabic;062B thehfinalarabic;FE9A thehinitialarabic;FE9B thehmedialarabic;FE9C thereexists;2203 therefore;2234 theta;03B8 theta1;03D1 thetasymbolgreek;03D1 thieuthacirclekorean;3279 thieuthaparenkorean;3219 thieuthcirclekorean;326B thieuthkorean;314C thieuthparenkorean;320B thirteencircle;246C thirteenparen;2480 thirteenperiod;2494 thonangmonthothai;0E11 thook;01AD thophuthaothai;0E12 thorn;00FE thothahanthai;0E17 thothanthai;0E10 thothongthai;0E18 thothungthai;0E16 thousandcyrillic;0482 thousandsseparatorarabic;066C thousandsseparatorpersian;066C three;0033 threearabic;0663 threebengali;09E9 threecircle;2462 threecircleinversesansserif;278C threedeva;0969 threeeighths;215C threegujarati;0AE9 threegurmukhi;0A69 threehackarabic;0663 threehangzhou;3023 threeideographicparen;3222 threeinferior;2083 threemonospace;FF13 threenumeratorbengali;09F6 threeoldstyle;F733 threeparen;2476 threeperiod;248A threepersian;06F3 threequarters;00BE threequartersemdash;F6DE threeroman;2172 threesuperior;00B3 threethai;0E53 thzsquare;3394 tihiragana;3061 tikatakana;30C1 tikatakanahalfwidth;FF81 tikeutacirclekorean;3270 tikeutaparenkorean;3210 tikeutcirclekorean;3262 tikeutkorean;3137 tikeutparenkorean;3202 tilde;02DC tildebelowcmb;0330 tildecmb;0303 tildecomb;0303 tildedoublecmb;0360 tildeoperator;223C tildeoverlaycmb;0334 tildeverticalcmb;033E timescircle;2297 tipehahebrew;0596 tipehalefthebrew;0596 tippigurmukhi;0A70 titlocyrilliccmb;0483 tiwnarmenian;057F tlinebelow;1E6F tmonospace;FF54 toarmenian;0569 tohiragana;3068 tokatakana;30C8 tokatakanahalfwidth;FF84 tonebarextrahighmod;02E5 tonebarextralowmod;02E9 tonebarhighmod;02E6 tonebarlowmod;02E8 tonebarmidmod;02E7 tonefive;01BD tonesix;0185 tonetwo;01A8 tonos;0384 tonsquare;3327 topatakthai;0E0F tortoiseshellbracketleft;3014 tortoiseshellbracketleftsmall;FE5D tortoiseshellbracketleftvertical;FE39 tortoiseshellbracketright;3015 tortoiseshellbracketrightsmall;FE5E tortoiseshellbracketrightvertical;FE3A totaothai;0E15 tpalatalhook;01AB tparen;24AF trademark;2122 trademarksans;F8EA trademarkserif;F6DB tretroflexhook;0288 triagdn;25BC triaglf;25C4 triagrt;25BA triagup;25B2 ts;02A6 tsadi;05E6 tsadidagesh;FB46 tsadidageshhebrew;FB46 tsadihebrew;05E6 tsecyrillic;0446 tsere;05B5 tsere12;05B5 tsere1e;05B5 tsere2b;05B5 tserehebrew;05B5 tserenarrowhebrew;05B5 tserequarterhebrew;05B5 tserewidehebrew;05B5 tshecyrillic;045B tsuperior;F6F3 ttabengali;099F ttadeva;091F ttagujarati;0A9F ttagurmukhi;0A1F tteharabic;0679 ttehfinalarabic;FB67 ttehinitialarabic;FB68 ttehmedialarabic;FB69 tthabengali;09A0 tthadeva;0920 tthagujarati;0AA0 tthagurmukhi;0A20 tturned;0287 tuhiragana;3064 tukatakana;30C4 tukatakanahalfwidth;FF82 tusmallhiragana;3063 tusmallkatakana;30C3 tusmallkatakanahalfwidth;FF6F twelvecircle;246B twelveparen;247F twelveperiod;2493 twelveroman;217B twentycircle;2473 twentyhangzhou;5344 twentyparen;2487 twentyperiod;249B two;0032 twoarabic;0662 twobengali;09E8 twocircle;2461 twocircleinversesansserif;278B twodeva;0968 twodotenleader;2025 twodotleader;2025 twodotleadervertical;FE30 twogujarati;0AE8 twogurmukhi;0A68 twohackarabic;0662 twohangzhou;3022 twoideographicparen;3221 twoinferior;2082 twomonospace;FF12 twonumeratorbengali;09F5 twooldstyle;F732 twoparen;2475 twoperiod;2489 twopersian;06F2 tworoman;2171 twostroke;01BB twosuperior;00B2 twothai;0E52 twothirds;2154 u;0075 uacute;00FA ubar;0289 ubengali;0989 ubopomofo;3128 ubreve;016D ucaron;01D4 ucircle;24E4 ucircumflex;00FB ucircumflexbelow;1E77 ucyrillic;0443 udattadeva;0951 udblacute;0171 udblgrave;0215 udeva;0909 udieresis;00FC udieresisacute;01D8 udieresisbelow;1E73 udieresiscaron;01DA udieresiscyrillic;04F1 udieresisgrave;01DC udieresismacron;01D6 udotbelow;1EE5 ugrave;00F9 ugujarati;0A89 ugurmukhi;0A09 uhiragana;3046 uhookabove;1EE7 uhorn;01B0 uhornacute;1EE9 uhorndotbelow;1EF1 uhorngrave;1EEB uhornhookabove;1EED uhorntilde;1EEF uhungarumlaut;0171 uhungarumlautcyrillic;04F3 uinvertedbreve;0217 ukatakana;30A6 ukatakanahalfwidth;FF73 ukcyrillic;0479 ukorean;315C umacron;016B umacroncyrillic;04EF umacrondieresis;1E7B umatragurmukhi;0A41 umonospace;FF55 underscore;005F underscoredbl;2017 underscoremonospace;FF3F underscorevertical;FE33 underscorewavy;FE4F union;222A universal;2200 uogonek;0173 uparen;24B0 upblock;2580 upperdothebrew;05C4 upsilon;03C5 upsilondieresis;03CB upsilondieresistonos;03B0 upsilonlatin;028A upsilontonos;03CD uptackbelowcmb;031D uptackmod;02D4 uragurmukhi;0A73 uring;016F ushortcyrillic;045E usmallhiragana;3045 usmallkatakana;30A5 usmallkatakanahalfwidth;FF69 ustraightcyrillic;04AF ustraightstrokecyrillic;04B1 utilde;0169 utildeacute;1E79 utildebelow;1E75 uubengali;098A uudeva;090A uugujarati;0A8A uugurmukhi;0A0A uumatragurmukhi;0A42 uuvowelsignbengali;09C2 uuvowelsigndeva;0942 uuvowelsigngujarati;0AC2 uvowelsignbengali;09C1 uvowelsigndeva;0941 uvowelsigngujarati;0AC1 v;0076 vadeva;0935 vagujarati;0AB5 vagurmukhi;0A35 vakatakana;30F7 vav;05D5 vavdagesh;FB35 vavdagesh65;FB35 vavdageshhebrew;FB35 vavhebrew;05D5 vavholam;FB4B vavholamhebrew;FB4B vavvavhebrew;05F0 vavyodhebrew;05F1 vcircle;24E5 vdotbelow;1E7F vecyrillic;0432 veharabic;06A4 vehfinalarabic;FB6B vehinitialarabic;FB6C vehmedialarabic;FB6D vekatakana;30F9 venus;2640 verticalbar;007C verticallineabovecmb;030D verticallinebelowcmb;0329 verticallinelowmod;02CC verticallinemod;02C8 vewarmenian;057E vhook;028B vikatakana;30F8 viramabengali;09CD viramadeva;094D viramagujarati;0ACD visargabengali;0983 visargadeva;0903 visargagujarati;0A83 vmonospace;FF56 voarmenian;0578 voicediterationhiragana;309E voicediterationkatakana;30FE voicedmarkkana;309B voicedmarkkanahalfwidth;FF9E vokatakana;30FA vparen;24B1 vtilde;1E7D vturned;028C vuhiragana;3094 vukatakana;30F4 w;0077 wacute;1E83 waekorean;3159 wahiragana;308F wakatakana;30EF wakatakanahalfwidth;FF9C wakorean;3158 wasmallhiragana;308E wasmallkatakana;30EE wattosquare;3357 wavedash;301C wavyunderscorevertical;FE34 wawarabic;0648 wawfinalarabic;FEEE wawhamzaabovearabic;0624 wawhamzaabovefinalarabic;FE86 wbsquare;33DD wcircle;24E6 wcircumflex;0175 wdieresis;1E85 wdotaccent;1E87 wdotbelow;1E89 wehiragana;3091 weierstrass;2118 wekatakana;30F1 wekorean;315E weokorean;315D wgrave;1E81 whitebullet;25E6 whitecircle;25CB whitecircleinverse;25D9 whitecornerbracketleft;300E whitecornerbracketleftvertical;FE43 whitecornerbracketright;300F whitecornerbracketrightvertical;FE44 whitediamond;25C7 whitediamondcontainingblacksmalldiamond;25C8 whitedownpointingsmalltriangle;25BF whitedownpointingtriangle;25BD whiteleftpointingsmalltriangle;25C3 whiteleftpointingtriangle;25C1 whitelenticularbracketleft;3016 whitelenticularbracketright;3017 whiterightpointingsmalltriangle;25B9 whiterightpointingtriangle;25B7 whitesmallsquare;25AB whitesmilingface;263A whitesquare;25A1 whitestar;2606 whitetelephone;260F whitetortoiseshellbracketleft;3018 whitetortoiseshellbracketright;3019 whiteuppointingsmalltriangle;25B5 whiteuppointingtriangle;25B3 wihiragana;3090 wikatakana;30F0 wikorean;315F wmonospace;FF57 wohiragana;3092 wokatakana;30F2 wokatakanahalfwidth;FF66 won;20A9 wonmonospace;FFE6 wowaenthai;0E27 wparen;24B2 wring;1E98 wsuperior;02B7 wturned;028D wynn;01BF x;0078 xabovecmb;033D xbopomofo;3112 xcircle;24E7 xdieresis;1E8D xdotaccent;1E8B xeharmenian;056D xi;03BE xmonospace;FF58 xparen;24B3 xsuperior;02E3 y;0079 yaadosquare;334E yabengali;09AF yacute;00FD yadeva;092F yaekorean;3152 yagujarati;0AAF yagurmukhi;0A2F yahiragana;3084 yakatakana;30E4 yakatakanahalfwidth;FF94 yakorean;3151 yamakkanthai;0E4E yasmallhiragana;3083 yasmallkatakana;30E3 yasmallkatakanahalfwidth;FF6C yatcyrillic;0463 ycircle;24E8 ycircumflex;0177 ydieresis;00FF ydotaccent;1E8F ydotbelow;1EF5 yeharabic;064A yehbarreearabic;06D2 yehbarreefinalarabic;FBAF yehfinalarabic;FEF2 yehhamzaabovearabic;0626 yehhamzaabovefinalarabic;FE8A yehhamzaaboveinitialarabic;FE8B yehhamzaabovemedialarabic;FE8C yehinitialarabic;FEF3 yehmedialarabic;FEF4 yehmeeminitialarabic;FCDD yehmeemisolatedarabic;FC58 yehnoonfinalarabic;FC94 yehthreedotsbelowarabic;06D1 yekorean;3156 yen;00A5 yenmonospace;FFE5 yeokorean;3155 yeorinhieuhkorean;3186 yerahbenyomohebrew;05AA yerahbenyomolefthebrew;05AA yericyrillic;044B yerudieresiscyrillic;04F9 yesieungkorean;3181 yesieungpansioskorean;3183 yesieungsioskorean;3182 yetivhebrew;059A ygrave;1EF3 yhook;01B4 yhookabove;1EF7 yiarmenian;0575 yicyrillic;0457 yikorean;3162 yinyang;262F yiwnarmenian;0582 ymonospace;FF59 yod;05D9 yoddagesh;FB39 yoddageshhebrew;FB39 yodhebrew;05D9 yodyodhebrew;05F2 yodyodpatahhebrew;FB1F yohiragana;3088 yoikorean;3189 yokatakana;30E8 yokatakanahalfwidth;FF96 yokorean;315B yosmallhiragana;3087 yosmallkatakana;30E7 yosmallkatakanahalfwidth;FF6E yotgreek;03F3 yoyaekorean;3188 yoyakorean;3187 yoyakthai;0E22 yoyingthai;0E0D yparen;24B4 ypogegrammeni;037A ypogegrammenigreekcmb;0345 yr;01A6 yring;1E99 ysuperior;02B8 ytilde;1EF9 yturned;028E yuhiragana;3086 yuikorean;318C yukatakana;30E6 yukatakanahalfwidth;FF95 yukorean;3160 yusbigcyrillic;046B yusbigiotifiedcyrillic;046D yuslittlecyrillic;0467 yuslittleiotifiedcyrillic;0469 yusmallhiragana;3085 yusmallkatakana;30E5 yusmallkatakanahalfwidth;FF6D yuyekorean;318B yuyeokorean;318A yyabengali;09DF yyadeva;095F z;007A zaarmenian;0566 zacute;017A zadeva;095B zagurmukhi;0A5B zaharabic;0638 zahfinalarabic;FEC6 zahinitialarabic;FEC7 zahiragana;3056 zahmedialarabic;FEC8 zainarabic;0632 zainfinalarabic;FEB0 zakatakana;30B6 zaqefgadolhebrew;0595 zaqefqatanhebrew;0594 zarqahebrew;0598 zayin;05D6 zayindagesh;FB36 zayindageshhebrew;FB36 zayinhebrew;05D6 zbopomofo;3117 zcaron;017E zcircle;24E9 zcircumflex;1E91 zcurl;0291 zdot;017C zdotaccent;017C zdotbelow;1E93 zecyrillic;0437 zedescendercyrillic;0499 zedieresiscyrillic;04DF zehiragana;305C zekatakana;30BC zero;0030 zeroarabic;0660 zerobengali;09E6 zerodeva;0966 zerogujarati;0AE6 zerogurmukhi;0A66 zerohackarabic;0660 zeroinferior;2080 zeromonospace;FF10 zerooldstyle;F730 zeropersian;06F0 zerosuperior;2070 zerothai;0E50 zerowidthjoiner;FEFF zerowidthnonjoiner;200C zerowidthspace;200B zeta;03B6 zhbopomofo;3113 zhearmenian;056A zhebrevecyrillic;04C2 zhecyrillic;0436 zhedescendercyrillic;0497 zhedieresiscyrillic;04DD zihiragana;3058 zikatakana;30B8 zinorhebrew;05AE zlinebelow;1E95 zmonospace;FF5A zohiragana;305E zokatakana;30BE zparen;24B5 zretroflexhook;0290 zstroke;01B6 zuhiragana;305A zukatakana;30BA a100;275E a101;2761 a102;2762 a103;2763 a104;2764 a105;2710 a106;2765 a107;2766 a108;2767 a109;2660 a10;2721 a110;2665 a111;2666 a112;2663 a117;2709 a118;2708 a119;2707 a11;261B a120;2460 a121;2461 a122;2462 a123;2463 a124;2464 a125;2465 a126;2466 a127;2467 a128;2468 a129;2469 a12;261E a130;2776 a131;2777 a132;2778 a133;2779 a134;277A a135;277B a136;277C a137;277D a138;277E a139;277F a13;270C a140;2780 a141;2781 a142;2782 a143;2783 a144;2784 a145;2785 a146;2786 a147;2787 a148;2788 a149;2789 a14;270D a150;278A a151;278B a152;278C a153;278D a154;278E a155;278F a156;2790 a157;2791 a158;2792 a159;2793 a15;270E a160;2794 a161;2192 a162;27A3 a163;2194 a164;2195 a165;2799 a166;279B a167;279C a168;279D a169;279E a16;270F a170;279F a171;27A0 a172;27A1 a173;27A2 a174;27A4 a175;27A5 a176;27A6 a177;27A7 a178;27A8 a179;27A9 a17;2711 a180;27AB a181;27AD a182;27AF a183;27B2 a184;27B3 a185;27B5 a186;27B8 a187;27BA a188;27BB a189;27BC a18;2712 a190;27BD a191;27BE a192;279A a193;27AA a194;27B6 a195;27B9 a196;2798 a197;27B4 a198;27B7 a199;27AC a19;2713 a1;2701 a200;27AE a201;27B1 a202;2703 a203;2750 a204;2752 a205;276E a206;2770 a20;2714 a21;2715 a22;2716 a23;2717 a24;2718 a25;2719 a26;271A a27;271B a28;271C a29;2722 a2;2702 a30;2723 a31;2724 a32;2725 a33;2726 a34;2727 a35;2605 a36;2729 a37;272A a38;272B a39;272C a3;2704 a40;272D a41;272E a42;272F a43;2730 a44;2731 a45;2732 a46;2733 a47;2734 a48;2735 a49;2736 a4;260E a50;2737 a51;2738 a52;2739 a53;273A a54;273B a55;273C a56;273D a57;273E a58;273F a59;2740 a5;2706 a60;2741 a61;2742 a62;2743 a63;2744 a64;2745 a65;2746 a66;2747 a67;2748 a68;2749 a69;274A a6;271D a70;274B a71;25CF a72;274D a73;25A0 a74;274F a75;2751 a76;25B2 a77;25BC a78;25C6 a79;2756 a7;271E a81;25D7 a82;2758 a83;2759 a84;275A a85;276F a86;2771 a87;2772 a88;2773 a89;2768 a8;271F a90;2769 a91;276C a92;276D a93;276A a94;276B a95;2774 a96;2775 a97;275B a98;275C a99;275D a9;2720 """ # string table management # class StringTable: def __init__( self, name_list, master_table_name ): self.names = name_list self.master_table = master_table_name self.indices = {} index = 0 for name in name_list: self.indices[name] = index index += len( name ) + 1 self.total = index def dump( self, file ): write = file.write write( " static const char " + self.master_table + "[" + repr( self.total ) + "] =\n" ) write( " {\n" ) line = "" for name in self.names: line += " '" line += string.join( ( re.findall( ".", name ) ), "','" ) line += "', 0,\n" write( line + " };\n\n\n" ) def dump_sublist( self, file, table_name, macro_name, sublist ): write = file.write write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" ) write( " /* Values are offsets into the `" + self.master_table + "' table */\n\n" ) write( " static const short " + table_name + "[" + macro_name + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for name in sublist: line += comma line += "%4d" % self.indices[name] col += 1 comma = "," if col == 14: col = 0 comma = ",\n " write( line + "\n };\n\n\n" ) # We now store the Adobe Glyph List in compressed form. The list is put # into a data structure called `trie' (because it has a tree-like # appearance). Consider, for example, that you want to store the # following name mapping: # # A => 1 # Aacute => 6 # Abalon => 2 # Abstract => 4 # # It is possible to store the entries as follows. # # A => 1 # | # +-acute => 6 # | # +-b # | # +-alon => 2 # | # +-stract => 4 # # We see that each node in the trie has: # # - one or more `letters' # - an optional value # - zero or more child nodes # # The first step is to call # # root = StringNode( "", 0 ) # for word in map.values(): # root.add( word, map[word] ) # # which creates a large trie where each node has only one children. # # Executing # # root = root.optimize() # # optimizes the trie by merging the letters of successive nodes whenever # possible. # # Each node of the trie is stored as follows. # # - First the node's letter, according to the following scheme. We # use the fact that in the AGL no name contains character codes > 127. # # name bitsize description # ---------------------------------------------------------------- # notlast 1 Set to 1 if this is not the last letter # in the word. # ascii 7 The letter's ASCII value. # # - The letter is followed by a children count and the value of the # current key (if any). Again we can do some optimization because all # AGL entries are from the BMP; this means that 16 bits are sufficient # to store its Unicode values. Additionally, no node has more than # 127 children. # # name bitsize description # ----------------------------------------- # hasvalue 1 Set to 1 if a 16-bit Unicode value follows. # num_children 7 Number of children. Can be 0 only if # `hasvalue' is set to 1. # value 16 Optional Unicode value. # # - A node is finished by a list of 16bit absolute offsets to the # children, which must be sorted in increasing order of their first # letter. # # For simplicity, all 16bit quantities are stored in big-endian order. # # The root node has first letter = 0, and no value. # class StringNode: def __init__( self, letter, value ): self.letter = letter self.value = value self.children = {} def __cmp__( self, other ): return ord( self.letter[0] ) - ord( other.letter[0] ) def add( self, word, value ): if len( word ) == 0: self.value = value return letter = word[0] word = word[1:] if self.children.has_key( letter ): child = self.children[letter] else: child = StringNode( letter, 0 ) self.children[letter] = child child.add( word, value ) def optimize( self ): # optimize all children first children = self.children.values() self.children = {} for child in children: self.children[child.letter[0]] = child.optimize() # don't optimize if there's a value, # if we don't have any child or if we # have more than one child if ( self.value != 0 ) or ( not children ) or len( children ) > 1: return self child = children[0] self.letter += child.letter self.value = child.value self.children = child.children return self def dump_debug( self, write, margin ): # this is used during debugging line = margin + "+-" if len( self.letter ) == 0: line += "<NOLETTER>" else: line += self.letter if self.value: line += " => " + repr( self.value ) write( line + "\n" ) if self.children: margin += "| " for child in self.children.values(): child.dump_debug( write, margin ) def locate( self, index ): self.index = index if len( self.letter ) > 0: index += len( self.letter ) + 1 else: index += 2 if self.value != 0: index += 2 children = self.children.values() children.sort() index += 2 * len( children ) for child in children: index = child.locate( index ) return index def store( self, storage ): # write the letters l = len( self.letter ) if l == 0: storage += struct.pack( "B", 0 ) else: for n in range( l ): val = ord( self.letter[n] ) if n < l - 1: val += 128 storage += struct.pack( "B", val ) # write the count children = self.children.values() children.sort() count = len( children ) if self.value != 0: storage += struct.pack( "!BH", count + 128, self.value ) else: storage += struct.pack( "B", count ) for child in children: storage += struct.pack( "!H", child.index ) for child in children: storage = child.store( storage ) return storage def adobe_glyph_values(): """return the list of glyph names and their unicode values""" lines = string.split( adobe_glyph_list, '\n' ) glyphs = [] values = [] for line in lines: if line: fields = string.split( line, ';' ) # print fields[1] + ' - ' + fields[0] subfields = string.split( fields[1], ' ' ) if len( subfields ) == 1: glyphs.append( fields[0] ) values.append( fields[1] ) return glyphs, values def filter_glyph_names( alist, filter ): """filter `alist' by taking _out_ all glyph names that are in `filter'""" count = 0 extras = [] for name in alist: try: filtered_index = filter.index( name ) except: extras.append( name ) return extras def dump_encoding( file, encoding_name, encoding_list ): """dump a given encoding""" write = file.write write( " /* the following are indices into the SID name table */\n" ) write( " static const unsigned short " + encoding_name + "[" + repr( len( encoding_list ) ) + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for value in encoding_list: line += comma line += "%3d" % value comma = "," col += 1 if col == 16: col = 0 comma = ",\n " write( line + "\n };\n\n\n" ) def dump_array( the_array, write, array_name ): """dumps a given encoding""" write( " static const unsigned char " + array_name + "[" + repr( len( the_array ) ) + "L] =\n" ) write( " {\n" ) line = "" comma = " " col = 0 for value in the_array: line += comma line += "%3d" % ord( value ) comma = "," col += 1 if col == 16: col = 0 comma = ",\n " if len( line ) > 1024: write( line ) line = "" write( line + "\n };\n\n\n" ) def main(): """main program body""" if len( sys.argv ) != 2: print __doc__ % sys.argv[0] sys.exit( 1 ) file = open( sys.argv[1], "w\n" ) write = file.write count_sid = len( sid_standard_names ) # `mac_extras' contains the list of glyph names in the Macintosh standard # encoding which are not in the SID Standard Names. # mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names ) # `base_list' contains the names of our final glyph names table. # It consists of the `mac_extras' glyph names, followed by the SID # standard names. # mac_extras_count = len( mac_extras ) base_list = mac_extras + sid_standard_names write( "/***************************************************************************/\n" ) write( "/* */\n" ) write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) ) write( "/* */\n" ) write( "/* PostScript glyph names. */\n" ) write( "/* */\n" ) write( "/* Copyright 2005, 2008, 2011 by */\n" ) write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" ) write( "/* */\n" ) write( "/* This file is part of the FreeType project, and may only be used, */\n" ) write( "/* modified, and distributed under the terms of the FreeType project */\n" ) write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" ) write( "/* this file you indicate that you have read the license and */\n" ) write( "/* understand and accept it fully. */\n" ) write( "/* */\n" ) write( "/***************************************************************************/\n" ) write( "\n" ) write( "\n" ) write( " /* This file has been generated automatically -- do not edit! */\n" ) write( "\n" ) write( "\n" ) # dump final glyph list (mac extras + sid standard names) # st = StringTable( base_list, "ft_standard_glyph_names" ) st.dump( file ) st.dump_sublist( file, "ft_mac_names", "FT_NUM_MAC_NAMES", mac_standard_names ) st.dump_sublist( file, "ft_sid_names", "FT_NUM_SID_NAMES", sid_standard_names ) dump_encoding( file, "t1_standard_encoding", t1_standard_encoding ) dump_encoding( file, "t1_expert_encoding", t1_expert_encoding ) # dump the AGL in its compressed form # agl_glyphs, agl_values = adobe_glyph_values() dict = StringNode( "", 0 ) for g in range( len( agl_glyphs ) ): dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) ) dict = dict.optimize() dict_len = dict.locate( 0 ) dict_array = dict.store( "" ) write( """\ /* * This table is a compressed version of the Adobe Glyph List (AGL), * optimized for efficient searching. It has been generated by the * `glnames.py' python script located in the `src/tools' directory. * * The lookup function to get the Unicode value for a given string * is defined below the table. */ #ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST """ ) dump_array( dict_array, write, "ft_adobe_glyph_list" ) # write the lookup routine now # write( """\ /* * This function searches the compressed table efficiently. */ static unsigned long ft_get_adobe_glyph_index( const char* name, const char* limit ) { int c = 0; int count, min, max; const unsigned char* p = ft_adobe_glyph_list; if ( name == 0 || name >= limit ) goto NotFound; c = *name++; count = p[1]; p += 2; min = 0; max = count; while ( min < max ) { int mid = ( min + max ) >> 1; const unsigned char* q = p + mid * 2; int c2; q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] ); c2 = q[0] & 127; if ( c2 == c ) { p = q; goto Found; } if ( c2 < c ) min = mid + 1; else max = mid; } goto NotFound; Found: for (;;) { /* assert (*p & 127) == c */ if ( name >= limit ) { if ( (p[0] & 128) == 0 && (p[1] & 128) != 0 ) return (unsigned long)( ( (int)p[2] << 8 ) | p[3] ); goto NotFound; } c = *name++; if ( p[0] & 128 ) { p++; if ( c != (p[0] & 127) ) goto NotFound; continue; } p++; count = p[0] & 127; if ( p[0] & 128 ) p += 2; p++; for ( ; count > 0; count--, p += 2 ) { int offset = ( (int)p[0] << 8 ) | p[1]; const unsigned char* q = ft_adobe_glyph_list + offset; if ( c == ( q[0] & 127 ) ) { p = q; goto NextIter; } } goto NotFound; NextIter: ; } NotFound: return 0; } #endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */ """ ) if 0: # generate unit test, or don't # # now write the unit test to check that everything works OK # write( "#ifdef TEST\n\n" ) write( "static const char* const the_names[] = {\n" ) for name in agl_glyphs: write( ' "' + name + '",\n' ) write( " 0\n};\n" ) write( "static const unsigned long the_values[] = {\n" ) for val in agl_values: write( ' 0x' + val + ',\n' ) write( " 0\n};\n" ) write( """ #include <stdlib.h> #include <stdio.h> int main( void ) { int result = 0; const char* const* names = the_names; const unsigned long* values = the_values; for ( ; *names; names++, values++ ) { const char* name = *names; unsigned long reference = *values; unsigned long value; value = ft_get_adobe_glyph_index( name, name + strlen( name ) ); if ( value != reference ) { result = 1; fprintf( stderr, "name '%s' => %04x instead of %04x\\n", name, value, reference ); } } return result; } """ ) write( "#endif /* TEST */\n" ) write("\n/* END */\n") # Now run the main routine # main() # END
miky-kr5/Super-HUGS-Revolution-98
refs/heads/master
particle.py
2
############################################ # Created on 1-10-2013. Miguel Angel Astor # ############################################ import math import random import pygame import math_utils import imloader class Particle(pygame.sprite.Sprite): def __init__(self, lifespan, scale, texture, gravity = [0.0, 9.8], position = [0,0], initial_vel = [100.0, 100.0], friction = 1.0, frame_rate = 60.0): pygame.sprite.Sprite.__init__(self) self.age = 0 self.lifespan = lifespan self.gravity = [gravity[0] / frame_rate, gravity[1] / frame_rate] self.position = position self.velocity = [float(initial_vel[0]) / frame_rate, float(initial_vel[1]) / frame_rate] # Pixels per frame. self.friction = friction self.size = (int(float(texture.get_width()) * scale), int(float(texture.get_height()) * scale)) self.alive = True self.frame_rate = frame_rate self.then = pygame.time.get_ticks() self.image = pygame.transform.smoothscale(texture, self.size) self.rect = self.image.get_rect() self.rect.center = (self.position[0], self.position[1]) self.screen_w = pygame.display.Info().current_w self.screen_h = pygame.display.Info().current_h def is_alive(self): return self.alive def kill(self): self.alive = False def set_gravity(self, gravity): self.gravity = list(gravity) def update(self): if self.alive: if self.age >= self.lifespan: self.alive = False return None now = pygame.time.get_ticks() delta_t = now - self.then self.position[0] += (self.velocity[0] * delta_t) * (self.frame_rate / 1000.0) self.position[1] += (self.velocity[1] * delta_t) * (self.frame_rate / 1000.0) self.velocity[0] *= self.friction self.velocity[1] *= self.friction self.position[0] += (self.gravity[0] * delta_t) * (self.frame_rate / 1000.0) self.position[1] += (self.gravity[1] * delta_t) * (self.frame_rate / 1000.0) self.rect.center = (self.position[0], self.position[1]) self.age += 1 self.then = now if self.rect.center[0] < 0 - self.rect.width or self.rect.center[0] > self.screen_w + self.rect.width: self.kill() return None if self.rect.center[1] < 0 - self.rect.height or self.rect.center[1] > self.screen_h + self.rect.height: self.kill() return None def draw(self, canvas): if self.alive: canvas.blit(self.image, self.rect) class ParticleSystem: def __init__(self, id, name, texture_filename, lifespan = 1000, max_particles = 1000, parts_per_second = 25, angle = 0): self.id = id self.name = name self.lifespan = lifespan self.max_particles = max_particles self.ppms = float(parts_per_second) / 1000.0 # Particles per milisecond. self.mspp = float(1000.0 / parts_per_second) # Miliseconds per particle. self.angle = angle self.working = False self.particles = set() self.texture = imloader.cached_image_loader.get_image_to_screen_percent(texture_filename) self.part_creation_accum = 0.0 self.then = pygame.time.get_ticks() self.gravity = [0.0, 9.8] self.position = [pygame.display.Info().current_w / 2, pygame.display.Info().current_h / 2] self.initial_velocity_max = 50 # Pixels per second. self.friction = 0.99 self.frame_rate = 60.0 self.rectangle = pygame.Rect(0, 0, 25, 25) self.rectangle.center = (self.position[0], self.position[1]) random.seed(None) def is_working(self): return self.working def start(self): self.working = True def stop(self): self.working = False def set_position(self, position): self.position = list(position) def set_gravity(self, gravity): self.gravity = list(gravity) def set_max_velocity(self, max_vel): self.initial_velocity_max = max_vel def set_angle(self, angle): self.angle = angle def set_friction(self, friction): self.friction = friction def update(self): # Calculate the time delta. now = pygame.time.get_ticks() delta_t = now - self.then # Eliminate dead particles. remove_set = set() for particle in self.particles: if not particle.is_alive(): remove_set.add(particle) self.particles.difference_update(remove_set) if self.working: # Create new particles if possible. if len(self.particles) < self.max_particles: max_parts = self.max_particles - len(self.particles) self.part_creation_accum += (self.ppms * delta_t) parts_needed = int(self.part_creation_accum // 1) if parts_needed >= 1: for i in range(parts_needed): velocity = [float(random.randrange(-self.initial_velocity_max, self.initial_velocity_max)), float(random.randrange(-self.initial_velocity_max, self.initial_velocity_max))] particle = Particle( int(self.lifespan), max(min(random.random(), 1.0), 0.2), self.texture, list(self.gravity), list(self.position), velocity, self.friction, int(self.frame_rate)) self.particles.add(particle) self.part_creation_accum = 0.0 # Update every particle. for particle in self.particles: particle.update() # Restart the time counter. self.then = now def draw(self, canvas): for particle in self.particles: particle.draw(canvas)
alexthered/kienhoc-platform
refs/heads/master
lms/djangoapps/foldit/migrations/0001_initial.py
114
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Score' db.create_table('foldit_score', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='foldit_scores', to=orm['auth.User'])), ('unique_user_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)), ('puzzle_id', self.gf('django.db.models.fields.IntegerField')()), ('best_score', self.gf('django.db.models.fields.FloatField')(db_index=True)), ('current_score', self.gf('django.db.models.fields.FloatField')(db_index=True)), ('score_version', self.gf('django.db.models.fields.IntegerField')()), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal('foldit', ['Score']) # Adding model 'PuzzleComplete' db.create_table('foldit_puzzlecomplete', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='foldit_puzzles_complete', to=orm['auth.User'])), ('unique_user_id', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)), ('puzzle_id', self.gf('django.db.models.fields.IntegerField')()), ('puzzle_set', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('puzzle_subset', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal('foldit', ['PuzzleComplete']) # Adding unique constraint on 'PuzzleComplete', fields ['user', 'puzzle_id', 'puzzle_set', 'puzzle_subset'] db.create_unique('foldit_puzzlecomplete', ['user_id', 'puzzle_id', 'puzzle_set', 'puzzle_subset']) def backwards(self, orm): # Removing unique constraint on 'PuzzleComplete', fields ['user', 'puzzle_id', 'puzzle_set', 'puzzle_subset'] db.delete_unique('foldit_puzzlecomplete', ['user_id', 'puzzle_id', 'puzzle_set', 'puzzle_subset']) # Deleting model 'Score' db.delete_table('foldit_score') # Deleting model 'PuzzleComplete' db.delete_table('foldit_puzzlecomplete') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'foldit.puzzlecomplete': { 'Meta': {'ordering': "['puzzle_id']", 'unique_together': "(('user', 'puzzle_id', 'puzzle_set', 'puzzle_subset'),)", 'object_name': 'PuzzleComplete'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'puzzle_id': ('django.db.models.fields.IntegerField', [], {}), 'puzzle_set': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'puzzle_subset': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'unique_user_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'foldit_puzzles_complete'", 'to': "orm['auth.User']"}) }, 'foldit.score': { 'Meta': {'object_name': 'Score'}, 'best_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_score': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'puzzle_id': ('django.db.models.fields.IntegerField', [], {}), 'score_version': ('django.db.models.fields.IntegerField', [], {}), 'unique_user_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'foldit_scores'", 'to': "orm['auth.User']"}) } } complete_apps = ['foldit']
jeffmahoney/supybot
refs/heads/urlsnarfer
test/test_dynamicScope.py
15
### # Copyright (c) 2005, Jeremiah Fincher # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### from supybot.test import * class TestDynamic(SupyTestCase): def test(self): def f(x): i = 2 return g(x) def g(y): j = 3 return h(y) def h(z): self.assertEqual(dynamic.z, z) self.assertEqual(dynamic.j, 3) self.assertEqual(dynamic.i, 2) self.assertEqual(dynamic.y, z) self.assertEqual(dynamic.x, z) #self.assertRaises(NameError, getattr, dynamic, 'asdfqwerqewr') self.assertEqual(dynamic.self, self) return z self.assertEqual(f(10), 10) def testCommonUsage(self): foo = 'bar' def f(): foo = dynamic.foo self.assertEqual(foo, 'bar') f() # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
petrvanblokland/Xierpa3
refs/heads/master
xierpa3/__init__.py
14
# -*- coding: UTF-8 -*- # ----------------------------------------------------------------------------- # xierpa server # Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com # # X I E R P A 3 # Distribution by the MIT License. # # -----------------------------------------------------------------------------
clovis/PhiloLogic4
refs/heads/master
www/scripts/get_text_object.py
2
#!/usr/bin/env python3 import json import os from wsgiref.handlers import CGIHandler from philologic.runtime.DB import DB from philologic.runtime.HitWrapper import ObjectWrapper from philologic.runtime import generate_text_object import sys sys.path.append("..") import custom_functions try: from custom_functions import WebConfig except ImportError: from philologic.runtime import WebConfig try: from custom_functions import WSGIHandler except ImportError: from philologic.runtime import WSGIHandler def get_text_object(environ, start_response): status = "200 OK" headers = [("Content-type", "application/json; charset=UTF-8"), ("Access-Control-Allow-Origin", "*")] start_response(status, headers) config = WebConfig(os.path.abspath(os.path.dirname(__file__)).replace("scripts", "")) db = DB(config.db_path + "/data/") request = WSGIHandler(environ, config) path = config.db_path zeros = 7 - len(request.philo_id) if zeros: request.philo_id += zeros * " 0" obj = ObjectWrapper(request["philo_id"].split(), db) text_object = generate_text_object(request, config) yield json.dumps(text_object).encode("utf8") if __name__ == "__main__": CGIHandler().run(get_text_object)
tai271828/courses
refs/heads/master
cs/udacity/cs101-intro-cs/code/lesson5/problem-set/refactoring_hashtable.py
4
# 6. In video 28. Update, it was suggested that some of the duplicate code in # lookup and update could be avoided by a better design. We can do this by # defining a procedure that finds the entry corresponding to a given key, and # using that in both lookup and update. # Here are the original procedures: def hashtable_update(htable, key, value): bucket = hashtable_get_bucket(htable, key) if (get_entry(bucket, key) != None): get_entry(bucket, key)[1] = value return bucket.append([key, value]) def hashtable_lookup(htable, key): bucket = hashtable_get_bucket(htable, key) return get_entry(bucket, key)[1] def get_entry(bucket, key): for entry in bucket: if entry[0] == key: return entry return None def make_hashtable(size): table = [] for unused in range(0, size): table.append([]) return table def hash_string(s, size): h = 0 for c in s: h = h + ord(c) return h % size def hashtable_get_bucket(htable, key): return htable[hash_string(key, len(htable))] # Whenever we have duplicate code like the loop that finds the entry in # hashtable_update and hashtable_lookup, we should think if there is a better way # to write this that would avoid the duplication. We should be able to rewrite # these procedures to be shorter by defining a new procedure and rewriting both # hashtable_update and hashtable_lookup to use that procedure. # Modify the code for both hashtable_update and hashtable_lookup to have the same # behavior they have now, but using fewer lines of code in each procedure. You # should define a new procedure to help with this. Your new version should have # approximately the same running time as the original version, but neither # hashtable_update or hashtable_lookup should include any for or while loop, and # the block of each procedure should be no more than 6 lines long. # Your procedures should have the same behavior as the originals. For example, table = make_hashtable(10) hashtable_update(table, 'Python', 'Monty') hashtable_update(table, 'CLU', 'Barbara Liskov') hashtable_update(table, 'JavaScript', 'Brendan Eich') hashtable_update(table, 'Python', 'Guido van Rossum') print hashtable_lookup(table, 'Python') #>>> Guido van Rossum
joequery/django
refs/heads/master
tests/field_subclassing/models.py
252
""" Tests for field subclassing. """ from django.db import models from django.utils.encoding import force_text, python_2_unicode_compatible from .fields import JSONField, Small, SmallerField, SmallField @python_2_unicode_compatible class MyModel(models.Model): name = models.CharField(max_length=10) data = SmallField('small field') def __str__(self): return force_text(self.name) class OtherModel(models.Model): data = SmallerField() class ChoicesModel(models.Model): SMALL_AB = Small('a', 'b') SMALL_CD = Small('c', 'd') SMALL_CHOICES = ( (SMALL_AB, str(SMALL_AB)), (SMALL_CD, str(SMALL_CD)), ) data = SmallField('small field', choices=SMALL_CHOICES) class DataModel(models.Model): data = JSONField()
freedomtan/tensorflow
refs/heads/master
tensorflow/python/data/experimental/benchmarks/matching_files_benchmark.py
16
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for the experimental `MatchingFilesDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import tempfile import time import numpy as np from tensorflow.python.client import session from tensorflow.python.data.experimental.ops import matching_files from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import test class MatchingFilesBenchmark(test.Benchmark): """Benchmark for the experimental `MatchingFilesDataset`.""" def benchmark_nested_directories(self): tmp_dir = tempfile.mkdtemp() width = 500 depth = 10 for i in range(width): for j in range(depth): new_base = os.path.join(tmp_dir, str(i), *[str(dir_name) for dir_name in range(j)]) os.makedirs(new_base) child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log'] for f in child_files: filename = os.path.join(new_base, f) open(filename, 'w').close() patterns = [ os.path.join(tmp_dir, os.path.join(*['**' for _ in range(depth)]), suffix) for suffix in ['*.txt', '*.log'] ] deltas = [] iters = 3 for _ in range(iters): with ops.Graph().as_default(): dataset = matching_files.MatchingFilesDataset(patterns) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) next_element = dataset_ops.make_one_shot_iterator(dataset).get_next() with session.Session() as sess: sub_deltas = [] while True: try: start = time.time() sess.run(next_element) end = time.time() sub_deltas.append(end - start) except errors.OutOfRangeError: break deltas.append(sub_deltas) median_deltas = np.median(deltas, axis=0) self.report_benchmark( iters=iters, wall_time=np.sum(median_deltas), extras={ 'read first file:': median_deltas[0], 'read second file:': median_deltas[1], 'avg time for reading %d more filenames:' % (len(median_deltas) - 2): np.average(median_deltas[2:]) }, name='nested_directory(%d*%d)' % (width, depth)) shutil.rmtree(tmp_dir, ignore_errors=True) if __name__ == '__main__': test.main()
cwisecarver/osf.io
refs/heads/develop
scripts/figshare/__init__.py
12133432
emedinaa/contentbox
refs/heads/master
third_party/social/tests/backends/__init__.py
12133432
deployed/django
refs/heads/master
django/contrib/sessions/management/__init__.py
12133432
nvoron23/statsmodels
refs/heads/master
statsmodels/robust/tests/__init__.py
12133432
axbaretto/beam
refs/heads/master
sdks/python/.tox/lint/lib/python2.7/site-packages/pyasn1/type/base.py
20
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com> # License: http://pyasn1.sf.net/license.html # import sys from pyasn1.type import constraint, tagmap, tag from pyasn1 import error __all__ = ['Asn1Item', 'Asn1ItemBase', 'AbstractSimpleAsn1Item', 'AbstractConstructedAsn1Item'] class Asn1Item(object): pass class Asn1ItemBase(Asn1Item): #: Default :py:class:`~pyasn1.type.tag.TagSet` object representing #: ASN.1 tag(s) associated with this ASN.1 type. tagSet = tag.TagSet() #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` #: object imposing constraints on initialization values. subtypeSpec = constraint.ConstraintsIntersection() # Used for ambiguous ASN.1 types identification typeId = None def __init__(self, tagSet=None, subtypeSpec=None): if tagSet is None: self._tagSet = self.tagSet else: self._tagSet = tagSet if subtypeSpec is None: self._subtypeSpec = self.subtypeSpec else: self._subtypeSpec = subtypeSpec def _verifySubtypeSpec(self, value, idx=None): try: self._subtypeSpec(value, idx) except error.PyAsn1Error: c, i, t = sys.exc_info() raise c('%s at %s' % (i, self.__class__.__name__)) def getSubtypeSpec(self): return self._subtypeSpec def getTagSet(self): return self._tagSet def getEffectiveTagSet(self): return self._tagSet # used by untagged types def getTagMap(self): return tagmap.TagMap({self._tagSet: self}) def isSameTypeWith(self, other, matchTags=True, matchConstraints=True): """Examine |ASN.1| type for equality with other ASN.1 type. ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints (:py:mod:`~pyasn1.type.constraint`) are examined when carrying out ASN.1 types comparison. No Python inheritance relationship between PyASN1 objects is considered. Parameters ---------- other: a pyasn1 type object Class instance representing ASN.1 type. Returns ------- : :class:`bool` :class:`True` if *other* is |ASN.1| type, :class:`False` otherwise. """ return self is other or \ (not matchTags or self._tagSet == other.getTagSet()) and \ (not matchConstraints or self._subtypeSpec == other.getSubtypeSpec()) def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True): """Examine |ASN.1| type for subtype relationship with other ASN.1 type. ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints (:py:mod:`~pyasn1.type.constraint`) are examined when carrying out ASN.1 types comparison. No Python inheritance relationship between PyASN1 objects is considered. Parameters ---------- other: a pyasn1 type object Class instance representing ASN.1 type. Returns ------- : :class:`bool` :class:`True` if *other* is a subtype of |ASN.1| type, :class:`False` otherwise. """ return (not matchTags or self._tagSet.isSuperTagSetOf(other.getTagSet())) and \ (not matchConstraints or (self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec()))) @staticmethod def isNoValue(*values): for value in values: if value is not None and value is not noValue: return False return True class NoValue(object): """Create a singleton instance of NoValue class. NoValue object can be used as an initializer on PyASN1 type class instantiation to represent ASN.1 type rather than ASN.1 data value. No operations other than type comparison can be performed on a PyASN1 type object. """ skipMethods = ('__getattribute__', '__getattr__', '__setattr__', '__delattr__', '__class__', '__init__', '__del__', '__new__', '__repr__', '__qualname__', '__objclass__', 'im_class', '__sizeof__') _instance = None def __new__(cls): if cls._instance is None: def getPlug(name): def plug(self, *args, **kw): raise error.PyAsn1Error('Uninitialized ASN.1 value ("%s" attribute looked up)' % name) return plug op_names = [name for typ in (str, int, list, dict) for name in dir(typ) if name not in cls.skipMethods and name.startswith('__') and name.endswith('__') and callable(getattr(typ, name))] for name in set(op_names): setattr(cls, name, getPlug(name)) cls._instance = object.__new__(cls) return cls._instance def __getattr__(self, attr): if attr in self.skipMethods: raise AttributeError('attribute %s not present' % attr) raise error.PyAsn1Error('No value for "%s"' % attr) def __repr__(self): return '%s()' % self.__class__.__name__ noValue = NoValue() # Base class for "simple" ASN.1 objects. These are immutable. class AbstractSimpleAsn1Item(Asn1ItemBase): #: Default payload value defaultValue = noValue def __init__(self, value=noValue, tagSet=None, subtypeSpec=None): Asn1ItemBase.__init__(self, tagSet, subtypeSpec) if self.isNoValue(value): value = self.defaultValue if self.isNoValue(value): self.__hashedValue = value = noValue else: value = self.prettyIn(value) self._verifySubtypeSpec(value) self.__hashedValue = hash(value) self._value = value self._len = None def __repr__(self): r = [] if self._value is not self.defaultValue: r.append(self.prettyOut(self._value)) if self._tagSet is not self.tagSet: r.append('tagSet=%r' % (self._tagSet,)) if self._subtypeSpec is not self.subtypeSpec: r.append('subtypeSpec=%r' % (self._subtypeSpec,)) return '%s(%s)' % (self.__class__.__name__, ', '.join(r)) def __str__(self): return str(self._value) def __eq__(self, other): return self is other and True or self._value == other def __ne__(self, other): return self._value != other def __lt__(self, other): return self._value < other def __le__(self, other): return self._value <= other def __gt__(self, other): return self._value > other def __ge__(self, other): return self._value >= other if sys.version_info[0] <= 2: def __nonzero__(self): return bool(self._value) else: def __bool__(self): return bool(self._value) def __hash__(self): return self.__hashedValue is noValue and hash(noValue) or self.__hashedValue def hasValue(self): """Indicate if |ASN.1| object represents ASN.1 value or ASN.1 type. The PyASN1 type objects can only participate in types comparison and serve as a blueprint for serialization codecs to resolve ambiguous types. The PyASN1 value objects can additionally participate to most of built-in Python operations. Returns ------- : :class:`bool` :class:`True` if object is ASN.1 value, :class:`False` otherwise. """ return self._value is not noValue def clone(self, value=noValue, tagSet=None, subtypeSpec=None): """Create a copy of a |ASN.1| type or object. Any parameters to the *clone()* method will replace corresponding properties of the |ASN.1| object. Parameters ---------- value: :class:`tuple`, :class:`str` or |ASN.1| object Initialization value to pass to new ASN.1 object instead of inheriting one from the caller. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing ASN.1 tag(s) to use in new object instead of inheriting from the caller subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing ASN.1 subtype constraint(s) to use in new object instead of inheriting from the caller Returns ------- : new instance of |ASN.1| type/value """ if self.isNoValue(value): if self.isNoValue(tagSet, subtypeSpec): return self value = self._value if tagSet is None: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec return self.__class__(value, tagSet, subtypeSpec) def subtype(self, value=noValue, implicitTag=None, explicitTag=None, subtypeSpec=None): """Create a copy of a |ASN.1| type or object. Any parameters to the *subtype()* method will be added to the corresponding properties of the |ASN.1| object. Parameters ---------- value: :class:`tuple`, :class:`str` or |ASN.1| object Initialization value to pass to new ASN.1 object instead of inheriting one from the caller. implicitTag: :py:class:`~pyasn1.type.tag.Tag` Implicitly apply given ASN.1 tag object to caller's :py:class:`~pyasn1.type.tag.TagSet`, then use the result as new object's ASN.1 tag(s). explicitTag: :py:class:`~pyasn1.type.tag.Tag` Explicitly apply given ASN.1 tag object to caller's :py:class:`~pyasn1.type.tag.TagSet`, then use the result as new object's ASN.1 tag(s). subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Add ASN.1 constraints object to one of the caller, then use the result as new object's ASN.1 constraints. Returns ------- : new instance of |ASN.1| type/value """ if self.isNoValue(value): value = self._value if implicitTag is not None: tagSet = self._tagSet.tagImplicitly(implicitTag) elif explicitTag is not None: tagSet = self._tagSet.tagExplicitly(explicitTag) else: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec else: subtypeSpec = self._subtypeSpec + subtypeSpec return self.__class__(value, tagSet, subtypeSpec) def prettyIn(self, value): return value def prettyOut(self, value): return str(value) def prettyPrint(self, scope=0): """Provide human-friendly printable object representation. Returns ------- : :class:`str` human-friendly type and/or value representation. """ if self.hasValue(): return self.prettyOut(self._value) else: return '<no value>' # XXX Compatibility stub def prettyPrinter(self, scope=0): return self.prettyPrint(scope) # noinspection PyUnusedLocal def prettyPrintType(self, scope=0): return '%s -> %s' % (self.getTagSet(), self.__class__.__name__) # # Constructed types: # * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice # * ASN1 types and values are represened by Python class instances # * Value initialization is made for defaulted components only # * Primary method of component addressing is by-position. Data model for base # type is Python sequence. Additional type-specific addressing methods # may be implemented for particular types. # * SequenceOf and SetOf types do not implement any additional methods # * Sequence, Set and Choice types also implement by-identifier addressing # * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing # * Sequence and Set types may include optional and defaulted # components # * Constructed types hold a reference to component types used for value # verification and ordering. # * Component type is a scalar type for SequenceOf/SetOf types and a list # of types for Sequence/Set/Choice. # def setupComponent(): """Returns a sentinel value. Indicates to a constructed type to set up its inner component so that it can be referred to. This is useful in situation when you want to populate descendants of a constructed type what requires being able to refer to their parent types along the way. Example ------- >>> constructed['record'] = setupComponent() >>> constructed['record']['scalar'] = 42 """ return noValue class AbstractConstructedAsn1Item(Asn1ItemBase): #: If `True`, requires exact component type matching, #: otherwise subtype relation is only enforced strictConstraints = False def __init__(self, componentType=None, tagSet=None, subtypeSpec=None, sizeSpec=None): Asn1ItemBase.__init__(self, tagSet, subtypeSpec) if componentType is None: self._componentType = self.componentType else: self._componentType = componentType if sizeSpec is None: self._sizeSpec = self.sizeSpec else: self._sizeSpec = sizeSpec self._componentValues = [] self._componentValuesSet = 0 def __repr__(self): r = [] if self._componentType is not self.componentType: r.append('componentType=%r' % (self._componentType,)) if self._tagSet is not self.tagSet: r.append('tagSet=%r' % (self._tagSet,)) if self._subtypeSpec is not self.subtypeSpec: r.append('subtypeSpec=%r' % (self._subtypeSpec,)) r = '%s(%s)' % (self.__class__.__name__, ', '.join(r)) if self._componentValues: r += '.setComponents(%s)' % ', '.join([repr(x) for x in self._componentValues]) return r def __eq__(self, other): return self is other and True or self._componentValues == other def __ne__(self, other): return self._componentValues != other def __lt__(self, other): return self._componentValues < other def __le__(self, other): return self._componentValues <= other def __gt__(self, other): return self._componentValues > other def __ge__(self, other): return self._componentValues >= other if sys.version_info[0] <= 2: def __nonzero__(self): return bool(self._componentValues) else: def __bool__(self): return bool(self._componentValues) def getComponentTagMap(self): raise error.PyAsn1Error('Method not implemented') def _cloneComponentValues(self, myClone, cloneValueFlag): pass def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None, cloneValueFlag=None): """Create a copy of a |ASN.1| type or object. Any parameters to the *clone()* method will replace corresponding properties of the |ASN.1| object. Parameters ---------- tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 subtype constraint(s) sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 size constraint(s) Returns ------- : new instance of |ASN.1| type/value """ if tagSet is None: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec if sizeSpec is None: sizeSpec = self._sizeSpec r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec) if cloneValueFlag: self._cloneComponentValues(r, cloneValueFlag) return r def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None, sizeSpec=None, cloneValueFlag=None): """Create a copy of a |ASN.1| type or object. Any parameters to the *subtype()* method will be added to the corresponding properties of the |ASN.1| object. Parameters ---------- tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 subtype constraint(s) sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 size constraint(s) Returns ------- : new instance of |ASN.1| type/value """ if implicitTag is not None: tagSet = self._tagSet.tagImplicitly(implicitTag) elif explicitTag is not None: tagSet = self._tagSet.tagExplicitly(explicitTag) else: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec else: subtypeSpec = self._subtypeSpec + subtypeSpec if sizeSpec is None: sizeSpec = self._sizeSpec else: sizeSpec = sizeSpec + self._sizeSpec r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec) if cloneValueFlag: self._cloneComponentValues(r, cloneValueFlag) return r def _verifyComponent(self, idx, value): pass def verifySizeSpec(self): self._sizeSpec(self) def getComponentByPosition(self, idx): raise error.PyAsn1Error('Method not implemented') def setComponentByPosition(self, idx, value, verifyConstraints=True): raise error.PyAsn1Error('Method not implemented') def setComponents(self, *args, **kwargs): for idx, value in enumerate(args): self[idx] = value for k in kwargs: self[k] = kwargs[k] return self def getComponentType(self): return self._componentType def setDefaultComponents(self): pass def __getitem__(self, idx): return self.getComponentByPosition(idx) def __setitem__(self, idx, value): self.setComponentByPosition(idx, value) def __len__(self): return len(self._componentValues) def clear(self): self._componentValues = [] self._componentValuesSet = 0
open-synergy/event
refs/heads/8.0
event_contact/__openerp__.py
1
# -*- coding: utf-8 -*- # © 2016 OpenSynergy Indonesia # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { 'name': 'Event Contacts', 'version': '8.0.1.0.0', 'summary': 'Add contacts to event and event type', 'author': 'OpenSynergy Indonesia, ' 'Antiun Ingeniería S.L., ' 'Odoo Community Association (OCA)', 'website': 'https://opensynergy-indonesia.com', 'category': 'Marketing', 'depends': ['event'], 'data': [ 'views/event_event_view.xml', 'views/event_type_view.xml', ], 'installable': True, 'license': 'AGPL-3', }
radio-astro-tools/pvextractor
refs/heads/master
pvextractor/geometry/slices.py
9
import numpy as np from .line_slices import extract_line_slice from .poly_slices import extract_poly_slice def extract_slice(cube, path, spacing=1.0, order=3, respect_nan=True, wcs=None): """ Given an array with shape (z, y, x), extract a (z, n) slice from a path with ``n`` segments. All units are in *pixels* .. note:: If there are NaNs in the cube, they will be treated as zeros when using spline interpolation. Parameters ---------- path : `Path` The path along which to define the slice spacing : float The position resolution in the final slice order : int, optional Spline interpolation order when using line paths. Does not have any effect for polygon paths. respect_nan : bool, optional If set to `False`, NaN values are changed to zero before computing the slices. Returns ------- slice : `numpy.ndarray` The slice """ if not respect_nan: cube = np.nan_to_num(cube) if path.width is None: x, y = path.sample_points(spacing=spacing, wcs=wcs) slice = extract_line_slice(cube, x, y, order=order) else: polygons = path.sample_polygons(spacing=spacing, wcs=wcs) slice = extract_poly_slice(cube, polygons) return slice
mquandalle/rethinkdb
refs/heads/next
external/v8_3.30.33.16/build/gyp/test/actions-multiple/gyptest-all.py
345
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies two actions can be attached to the same input files. """ import sys import TestGyp test = TestGyp.TestGyp() test.run_gyp('actions.gyp', chdir='src') test.relocate('src', 'relocate/src') # Test of fine-grained dependencies for generators that can build individual # files on demand. # In particular: # - TargetA depends on TargetB. # - TargetA and TargetB are 'none' type with actions attached. # - TargetA has multiple actions. # - An output from one of the actions in TargetA (not the first listed), # is requested as the build target. # Ensure that TargetB gets built. # # This sub-test can only be done with generators/build tools that can # be asked to build individual files rather than whole targets (make, ninja). if test.format in ['make', 'ninja']: # Select location of target based on generator. if test.format == 'make': target = 'multi2.txt' elif test.format == 'ninja': if sys.platform in ['win32', 'cygwin']: target = '..\\..\\multi2.txt' else: target = '../../multi2.txt' else: assert False test.build('actions.gyp', chdir='relocate/src', target=target) test.must_contain('relocate/src/multi2.txt', 'hello there') test.must_contain('relocate/src/multi_dep.txt', 'hello there') # Test that two actions can be attached to the same inputs. test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_contain('relocate/src/output1.txt', 'hello there') test.must_contain('relocate/src/output2.txt', 'hello there') test.must_contain('relocate/src/output3.txt', 'hello there') test.must_contain('relocate/src/output4.txt', 'hello there') # Test that process_outputs_as_sources works in conjuction with merged # actions. test.run_built_executable( 'multiple_action_source_filter', chdir='relocate/src', stdout=( '{\n' 'bar\n' 'car\n' 'dar\n' 'ear\n' '}\n' ), ) test.pass_test()
amisrs/angular-flask
refs/heads/master
angular_flask/lib/python2.7/site-packages/yaml/loader.py
672
__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] from reader import * from scanner import * from parser import * from composer import * from constructor import * from resolver import * class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) BaseConstructor.__init__(self) BaseResolver.__init__(self) class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) SafeConstructor.__init__(self) Resolver.__init__(self) class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) Constructor.__init__(self) Resolver.__init__(self)
manazhao/tf_recsys
refs/heads/r1.0
tensorflow/contrib/framework/python/framework/experimental.py
179
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tensor utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import decorator_utils def _add_experimental_function_notice_to_docstring(doc): """Adds an experimental notice to a docstring for experimental functions.""" return decorator_utils.add_notice_to_docstring( doc, '', 'EXPERIMENTAL FUNCTION', '(experimental)', ['THIS FUNCTION IS EXPERIMENTAL. It may change or ' 'be removed at any time, and without warning.']) def experimental(func): """Decorator for marking functions or methods experimental. This decorator logs an experimental warning whenever the decorated function is called. It has the following format: <function> (from <module>) is experimental and may change or be removed at any time, and without warning. <function> will include the class name if it is a method. It also edits the docstring of the function: ' (experimental)' is appended to the first line of the docstring and a notice is prepended to the rest of the docstring. Args: func: A function or method to mark experimental. Returns: Decorated function or method. """ decorator_utils.validate_callable(func, 'experimental') @functools.wraps(func) def new_func(*args, **kwargs): logging.warning( '%s (from %s) is experimental and may change or be removed at ' 'any time, and without warning.', decorator_utils.get_qualified_name(func), func.__module__) return func(*args, **kwargs) new_func.__doc__ = _add_experimental_function_notice_to_docstring( func.__doc__) return new_func
project-magpie/enigma2-openpli
refs/heads/master
lib/python/Plugins/SystemPlugins/FastScan/__init__.py
12133432