content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
class Player():
def __init__(self, name, start=False, ai_switch=False):
self.ai = ai_switch
self.values = {0:.5}
self.name = name
self.turn = start
self.epsilon = 1
| class Player:
def __init__(self, name, start=False, ai_switch=False):
self.ai = ai_switch
self.values = {0: 0.5}
self.name = name
self.turn = start
self.epsilon = 1 |
CONFIG_FILE_PATH = 'TypeRacerStats/src/config.json'
ACCOUNTS_FILE_PATH = 'TypeRacerStats/src/accounts.json'
ALIASES_FILE_PATH = 'TypeRacerStats/src/commands.json'
PREFIXES_FILE_PATH = 'TypeRacerStats/src/prefixes.json'
SUPPORTERS_FILE_PATH = 'TypeRacerStats/src/supporter_colors.json'
UNIVERSES_FILE_PATH = 'TypeRacerStats/src/universes.txt'
ART_JSON = 'TypeRacerStats/src/art.json'
CLIPS_JSON = 'TypeRacerStats/src/clips.json'
DATABASE_PATH = 'TypeRacerStats/src/data/typeracer.db'
TEMPORARY_DATABASE_PATH = 'TypeRacerStats/src/data/temp.db'
TEXTS_FILE_PATH = 'TypeRacerStats/src/data/texts'
TOPTENS_JSON_FILE_PATH = 'TypeRacerStats/src/data/texts/top_ten.json'
TOPTENS_FILE_PATH = 'TypeRacerStats/src/data/texts/player_top_tens.json'
TEXTS_FILE_PATH_CSV = 'TypeRacerStats/src/data/texts/texts.csv'
MAINTAIN_PLAYERS_TXT = 'TypeRacerStats/src/data/maintain_players.txt'
CSS_COLORS = 'TypeRacerStats/src/css_colors.json'
CMAPS = 'TypeRacerStats/src/cmaps.json'
TYPERACER_RECORDS_JSON = 'TypeRacerStats/src/data/typeracer_records.json'
COUNTRY_CODES = 'TypeRacerStats/src/country_codes.json'
TEXTS_LENGTHS = 'TypeRacerStats/src/data/texts/texts.json'
TEXTS_LARGE = 'TypeRacerStats/src/data/texts/texts_large.json'
CHANGELOG = 'TypeRacerStats/src/changelog.json'
KEYMAPS_SVG = 'TypeRacerStats/src/keymap_svg.txt'
BLANK_KEYMAP = 'TypeRacerStats/src/keymap_template.json'
| config_file_path = 'TypeRacerStats/src/config.json'
accounts_file_path = 'TypeRacerStats/src/accounts.json'
aliases_file_path = 'TypeRacerStats/src/commands.json'
prefixes_file_path = 'TypeRacerStats/src/prefixes.json'
supporters_file_path = 'TypeRacerStats/src/supporter_colors.json'
universes_file_path = 'TypeRacerStats/src/universes.txt'
art_json = 'TypeRacerStats/src/art.json'
clips_json = 'TypeRacerStats/src/clips.json'
database_path = 'TypeRacerStats/src/data/typeracer.db'
temporary_database_path = 'TypeRacerStats/src/data/temp.db'
texts_file_path = 'TypeRacerStats/src/data/texts'
toptens_json_file_path = 'TypeRacerStats/src/data/texts/top_ten.json'
toptens_file_path = 'TypeRacerStats/src/data/texts/player_top_tens.json'
texts_file_path_csv = 'TypeRacerStats/src/data/texts/texts.csv'
maintain_players_txt = 'TypeRacerStats/src/data/maintain_players.txt'
css_colors = 'TypeRacerStats/src/css_colors.json'
cmaps = 'TypeRacerStats/src/cmaps.json'
typeracer_records_json = 'TypeRacerStats/src/data/typeracer_records.json'
country_codes = 'TypeRacerStats/src/country_codes.json'
texts_lengths = 'TypeRacerStats/src/data/texts/texts.json'
texts_large = 'TypeRacerStats/src/data/texts/texts_large.json'
changelog = 'TypeRacerStats/src/changelog.json'
keymaps_svg = 'TypeRacerStats/src/keymap_svg.txt'
blank_keymap = 'TypeRacerStats/src/keymap_template.json' |
#
# PySNMP MIB module HP-ICF-VRRP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-VRRP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:23:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
ModuleIdentity, iso, Integer32, NotificationType, Counter64, Counter32, TimeTicks, Gauge32, Unsigned32, ObjectIdentity, Bits, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "iso", "Integer32", "NotificationType", "Counter64", "Counter32", "TimeTicks", "Gauge32", "Unsigned32", "ObjectIdentity", "Bits", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TruthValue, RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "TextualConvention", "DisplayString")
vrrpOperVrId, vrrpAssoIpAddrEntry, vrrpOperEntry = mibBuilder.importSymbols("VRRP-MIB", "vrrpOperVrId", "vrrpAssoIpAddrEntry", "vrrpOperEntry")
hpicfVrrpMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31))
hpicfVrrpMIB.setRevisions(('2012-11-15 00:00', '2013-06-12 00:00', '2012-02-22 00:00', '2010-10-20 00:00', '2010-07-28 00:00', '2009-05-19 00:00', '2008-02-20 00:00', '2007-12-12 00:00', '2007-08-22 00:00', '2005-07-14 00:00',))
if mibBuilder.loadTexts: hpicfVrrpMIB.setLastUpdated('201211150000Z')
if mibBuilder.loadTexts: hpicfVrrpMIB.setOrganization('HP Networking')
hpicfVrrpOperations = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1))
hpicfVrrpConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2))
hpicfVrrpAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfVrrpAdminStatus.setStatus('deprecated')
hpicfVrrpOperTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2), )
if mibBuilder.loadTexts: hpicfVrrpOperTable.setStatus('current')
hpicfVrrpOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1), )
vrrpOperEntry.registerAugmentions(("HP-ICF-VRRP-MIB", "hpicfVrrpOperEntry"))
hpicfVrrpOperEntry.setIndexNames(*vrrpOperEntry.getIndexNames())
if mibBuilder.loadTexts: hpicfVrrpOperEntry.setStatus('current')
hpicfVrrpVrMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("owner", 1), ("backup", 2), ("uninitialized", 3))).clone('uninitialized')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpVrMode.setStatus('current')
hpicfVrrpVrMasterPreempt = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpVrMasterPreempt.setStatus('current')
hpicfVrrpVrTransferControl = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpVrTransferControl.setStatus('current')
hpicfVrrpVrPreemptDelayTime = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpVrPreemptDelayTime.setStatus('current')
hpicfVrrpVrControl = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("failback", 1), ("failover", 2), ("failoverWithMonitoring", 3), ("invalid", 4))).clone('invalid')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpVrControl.setStatus('current')
hpicfVrrpVrRespondToPing = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 6), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpVrRespondToPing.setStatus('current')
hpicfVrrpAssoIpAddrTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 3), )
if mibBuilder.loadTexts: hpicfVrrpAssoIpAddrTable.setStatus('current')
hpicfVrrpAssoIpAddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 3, 1), )
vrrpAssoIpAddrEntry.registerAugmentions(("HP-ICF-VRRP-MIB", "hpicfVrrpAssoIpAddrEntry"))
hpicfVrrpAssoIpAddrEntry.setIndexNames(*vrrpAssoIpAddrEntry.getIndexNames())
if mibBuilder.loadTexts: hpicfVrrpAssoIpAddrEntry.setStatus('current')
hpicfVrrpAssoIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 3, 1, 1), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpAssoIpMask.setStatus('current')
hpicfVrrpTrackTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5), )
if mibBuilder.loadTexts: hpicfVrrpTrackTable.setStatus('current')
hpicfVrrpTrackEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "VRRP-MIB", "vrrpOperVrId"), (0, "HP-ICF-VRRP-MIB", "hpicfVrrpVrTrackType"), (0, "HP-ICF-VRRP-MIB", "hpicfVrrpVrTrackEntity"))
if mibBuilder.loadTexts: hpicfVrrpTrackEntry.setStatus('current')
hpicfVrrpVrTrackType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("port", 1), ("trunk", 2), ("vlan", 3))))
if mibBuilder.loadTexts: hpicfVrrpVrTrackType.setStatus('current')
hpicfVrrpVrTrackEntity = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 255)))
if mibBuilder.loadTexts: hpicfVrrpVrTrackEntity.setStatus('current')
hpicfVrrpTrackRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfVrrpTrackRowStatus.setStatus('current')
hpicfVrrpTrackState = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("down", 0), ("up", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfVrrpTrackState.setStatus('current')
hpicfVrrpStatsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 6), )
if mibBuilder.loadTexts: hpicfVrrpStatsTable.setStatus('current')
hpicfVrrpRespondToPing = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 7), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfVrrpRespondToPing.setStatus('deprecated')
hpicfVrrpRemoveConfig = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 8), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfVrrpRemoveConfig.setStatus('deprecated')
hpicfVrrpNonstop = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 9), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfVrrpNonstop.setStatus('deprecated')
hpicfVrrpStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 6, 1), )
vrrpOperEntry.registerAugmentions(("HP-ICF-VRRP-MIB", "hpicfVrrpStatsEntry"))
hpicfVrrpStatsEntry.setIndexNames(*vrrpOperEntry.getIndexNames())
if mibBuilder.loadTexts: hpicfVrrpStatsEntry.setStatus('current')
hpicfVrrpStatsNearFailovers = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 6, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfVrrpStatsNearFailovers.setStatus('current')
hpicfVrrpMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1))
hpicfVrrpMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2))
hpicfVrrpMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 1)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance = hpicfVrrpMIBCompliance.setStatus('deprecated')
hpicfVrrpMIBCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 2)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance1 = hpicfVrrpMIBCompliance1.setStatus('deprecated')
hpicfVrrpMIBCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 3)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpVrPingGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrPingGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance2 = hpicfVrrpMIBCompliance2.setStatus('current')
hpicfVrrpMIBCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 4)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpNonstopGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpNonstopGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance3 = hpicfVrrpMIBCompliance3.setStatus('deprecated')
hpicfVrrpMIBCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 5)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpOperationsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance4 = hpicfVrrpMIBCompliance4.setStatus('deprecated')
hpicfVrrpMIBCompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 6)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackGroup1"), ("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance5 = hpicfVrrpMIBCompliance5.setStatus('deprecated')
hpicfVrrpMIBCompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 7)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup1"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackGroup1"), ("HP-ICF-VRRP-MIB", "hpicfVrrpOperGroup1"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance6 = hpicfVrrpMIBCompliance6.setStatus('current')
hpicfVrrpMIBCompliance7 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 8)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpOperationsGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpMIBCompliance7 = hpicfVrrpMIBCompliance7.setStatus('current')
hpicfVrrpOperGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 1)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpAdminStatus"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrMode"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrMasterPreempt"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrTransferControl"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrPreemptDelayTime"), ("HP-ICF-VRRP-MIB", "hpicfVrrpAssoIpMask"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpOperGroup = hpicfVrrpOperGroup.setStatus('deprecated')
hpicfVrrpTrackGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 2)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpTrackRowStatus"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrControl"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpTrackGroup = hpicfVrrpTrackGroup.setStatus('deprecated')
hpicfVrrpVrPingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 3)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpVrRespondToPing"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpVrPingGroup = hpicfVrrpVrPingGroup.setStatus('current')
hpicfVrrpNonstopGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 4)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpNonstop"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpNonstopGroup = hpicfVrrpNonstopGroup.setStatus('deprecated')
hpicfVrrpOperationsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 5)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpRespondToPing"), ("HP-ICF-VRRP-MIB", "hpicfVrrpRemoveConfig"), ("HP-ICF-VRRP-MIB", "hpicfVrrpStatsNearFailovers"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpOperationsGroup = hpicfVrrpOperationsGroup.setStatus('deprecated')
hpicfVrrpTrackGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 6)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpTrackRowStatus"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrControl"), ("HP-ICF-VRRP-MIB", "hpicfVrrpTrackState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpTrackGroup1 = hpicfVrrpTrackGroup1.setStatus('current')
hpicfVrrpOperGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 7)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpVrMode"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrMasterPreempt"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrTransferControl"), ("HP-ICF-VRRP-MIB", "hpicfVrrpVrPreemptDelayTime"), ("HP-ICF-VRRP-MIB", "hpicfVrrpAssoIpMask"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpOperGroup1 = hpicfVrrpOperGroup1.setStatus('current')
hpicfVrrpOperationsGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 8)).setObjects(("HP-ICF-VRRP-MIB", "hpicfVrrpStatsNearFailovers"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfVrrpOperationsGroup1 = hpicfVrrpOperationsGroup1.setStatus('current')
mibBuilder.exportSymbols("HP-ICF-VRRP-MIB", hpicfVrrpVrControl=hpicfVrrpVrControl, hpicfVrrpTrackEntry=hpicfVrrpTrackEntry, hpicfVrrpVrPreemptDelayTime=hpicfVrrpVrPreemptDelayTime, hpicfVrrpTrackTable=hpicfVrrpTrackTable, hpicfVrrpVrTrackEntity=hpicfVrrpVrTrackEntity, hpicfVrrpVrTrackType=hpicfVrrpVrTrackType, hpicfVrrpAssoIpAddrTable=hpicfVrrpAssoIpAddrTable, hpicfVrrpAdminStatus=hpicfVrrpAdminStatus, hpicfVrrpAssoIpAddrEntry=hpicfVrrpAssoIpAddrEntry, hpicfVrrpVrRespondToPing=hpicfVrrpVrRespondToPing, hpicfVrrpStatsEntry=hpicfVrrpStatsEntry, hpicfVrrpRemoveConfig=hpicfVrrpRemoveConfig, hpicfVrrpOperEntry=hpicfVrrpOperEntry, hpicfVrrpOperations=hpicfVrrpOperations, hpicfVrrpMIBCompliance=hpicfVrrpMIBCompliance, hpicfVrrpTrackGroup=hpicfVrrpTrackGroup, hpicfVrrpNonstop=hpicfVrrpNonstop, PYSNMP_MODULE_ID=hpicfVrrpMIB, hpicfVrrpMIBCompliance1=hpicfVrrpMIBCompliance1, hpicfVrrpVrMasterPreempt=hpicfVrrpVrMasterPreempt, hpicfVrrpMIBCompliance5=hpicfVrrpMIBCompliance5, hpicfVrrpNonstopGroup=hpicfVrrpNonstopGroup, hpicfVrrpMIBCompliance3=hpicfVrrpMIBCompliance3, hpicfVrrpTrackGroup1=hpicfVrrpTrackGroup1, hpicfVrrpVrPingGroup=hpicfVrrpVrPingGroup, hpicfVrrpVrMode=hpicfVrrpVrMode, hpicfVrrpOperationsGroup=hpicfVrrpOperationsGroup, hpicfVrrpTrackRowStatus=hpicfVrrpTrackRowStatus, hpicfVrrpConformance=hpicfVrrpConformance, hpicfVrrpMIB=hpicfVrrpMIB, hpicfVrrpMIBCompliance6=hpicfVrrpMIBCompliance6, hpicfVrrpOperGroup1=hpicfVrrpOperGroup1, hpicfVrrpVrTransferControl=hpicfVrrpVrTransferControl, hpicfVrrpMIBCompliances=hpicfVrrpMIBCompliances, hpicfVrrpRespondToPing=hpicfVrrpRespondToPing, hpicfVrrpAssoIpMask=hpicfVrrpAssoIpMask, hpicfVrrpMIBCompliance7=hpicfVrrpMIBCompliance7, hpicfVrrpOperationsGroup1=hpicfVrrpOperationsGroup1, hpicfVrrpStatsNearFailovers=hpicfVrrpStatsNearFailovers, hpicfVrrpStatsTable=hpicfVrrpStatsTable, hpicfVrrpOperGroup=hpicfVrrpOperGroup, hpicfVrrpMIBCompliance4=hpicfVrrpMIBCompliance4, hpicfVrrpMIBGroups=hpicfVrrpMIBGroups, hpicfVrrpMIBCompliance2=hpicfVrrpMIBCompliance2, hpicfVrrpOperTable=hpicfVrrpOperTable, hpicfVrrpTrackState=hpicfVrrpTrackState)
| (octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_range_constraint, value_size_constraint, constraints_union, single_value_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueRangeConstraint', 'ValueSizeConstraint', 'ConstraintsUnion', 'SingleValueConstraint', 'ConstraintsIntersection')
(hp_switch,) = mibBuilder.importSymbols('HP-ICF-OID', 'hpSwitch')
(if_index,) = mibBuilder.importSymbols('IF-MIB', 'ifIndex')
(snmp_admin_string,) = mibBuilder.importSymbols('SNMP-FRAMEWORK-MIB', 'SnmpAdminString')
(notification_group, module_compliance, object_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance', 'ObjectGroup')
(module_identity, iso, integer32, notification_type, counter64, counter32, time_ticks, gauge32, unsigned32, object_identity, bits, ip_address, mib_identifier, mib_scalar, mib_table, mib_table_row, mib_table_column) = mibBuilder.importSymbols('SNMPv2-SMI', 'ModuleIdentity', 'iso', 'Integer32', 'NotificationType', 'Counter64', 'Counter32', 'TimeTicks', 'Gauge32', 'Unsigned32', 'ObjectIdentity', 'Bits', 'IpAddress', 'MibIdentifier', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn')
(truth_value, row_status, textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TruthValue', 'RowStatus', 'TextualConvention', 'DisplayString')
(vrrp_oper_vr_id, vrrp_asso_ip_addr_entry, vrrp_oper_entry) = mibBuilder.importSymbols('VRRP-MIB', 'vrrpOperVrId', 'vrrpAssoIpAddrEntry', 'vrrpOperEntry')
hpicf_vrrp_mib = module_identity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31))
hpicfVrrpMIB.setRevisions(('2012-11-15 00:00', '2013-06-12 00:00', '2012-02-22 00:00', '2010-10-20 00:00', '2010-07-28 00:00', '2009-05-19 00:00', '2008-02-20 00:00', '2007-12-12 00:00', '2007-08-22 00:00', '2005-07-14 00:00'))
if mibBuilder.loadTexts:
hpicfVrrpMIB.setLastUpdated('201211150000Z')
if mibBuilder.loadTexts:
hpicfVrrpMIB.setOrganization('HP Networking')
hpicf_vrrp_operations = mib_identifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1))
hpicf_vrrp_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2))
hpicf_vrrp_admin_status = mib_scalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 1), truth_value().clone('false')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hpicfVrrpAdminStatus.setStatus('deprecated')
hpicf_vrrp_oper_table = mib_table((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2))
if mibBuilder.loadTexts:
hpicfVrrpOperTable.setStatus('current')
hpicf_vrrp_oper_entry = mib_table_row((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1))
vrrpOperEntry.registerAugmentions(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperEntry'))
hpicfVrrpOperEntry.setIndexNames(*vrrpOperEntry.getIndexNames())
if mibBuilder.loadTexts:
hpicfVrrpOperEntry.setStatus('current')
hpicf_vrrp_vr_mode = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 1), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('owner', 1), ('backup', 2), ('uninitialized', 3))).clone('uninitialized')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpVrMode.setStatus('current')
hpicf_vrrp_vr_master_preempt = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 2), truth_value().clone('false')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpVrMasterPreempt.setStatus('current')
hpicf_vrrp_vr_transfer_control = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 3), truth_value().clone('false')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpVrTransferControl.setStatus('current')
hpicf_vrrp_vr_preempt_delay_time = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 4), integer32().subtype(subtypeSpec=value_range_constraint(0, 600))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpVrPreemptDelayTime.setStatus('current')
hpicf_vrrp_vr_control = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 5), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4))).clone(namedValues=named_values(('failback', 1), ('failover', 2), ('failoverWithMonitoring', 3), ('invalid', 4))).clone('invalid')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpVrControl.setStatus('current')
hpicf_vrrp_vr_respond_to_ping = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 2, 1, 6), truth_value().clone('true')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpVrRespondToPing.setStatus('current')
hpicf_vrrp_asso_ip_addr_table = mib_table((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 3))
if mibBuilder.loadTexts:
hpicfVrrpAssoIpAddrTable.setStatus('current')
hpicf_vrrp_asso_ip_addr_entry = mib_table_row((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 3, 1))
vrrpAssoIpAddrEntry.registerAugmentions(('HP-ICF-VRRP-MIB', 'hpicfVrrpAssoIpAddrEntry'))
hpicfVrrpAssoIpAddrEntry.setIndexNames(*vrrpAssoIpAddrEntry.getIndexNames())
if mibBuilder.loadTexts:
hpicfVrrpAssoIpAddrEntry.setStatus('current')
hpicf_vrrp_asso_ip_mask = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 3, 1, 1), ip_address().clone(hexValue='00000000')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpAssoIpMask.setStatus('current')
hpicf_vrrp_track_table = mib_table((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5))
if mibBuilder.loadTexts:
hpicfVrrpTrackTable.setStatus('current')
hpicf_vrrp_track_entry = mib_table_row((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1)).setIndexNames((0, 'IF-MIB', 'ifIndex'), (0, 'VRRP-MIB', 'vrrpOperVrId'), (0, 'HP-ICF-VRRP-MIB', 'hpicfVrrpVrTrackType'), (0, 'HP-ICF-VRRP-MIB', 'hpicfVrrpVrTrackEntity'))
if mibBuilder.loadTexts:
hpicfVrrpTrackEntry.setStatus('current')
hpicf_vrrp_vr_track_type = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 1), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('port', 1), ('trunk', 2), ('vlan', 3))))
if mibBuilder.loadTexts:
hpicfVrrpVrTrackType.setStatus('current')
hpicf_vrrp_vr_track_entity = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 2), snmp_admin_string().subtype(subtypeSpec=value_size_constraint(1, 255)))
if mibBuilder.loadTexts:
hpicfVrrpVrTrackEntity.setStatus('current')
hpicf_vrrp_track_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 3), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hpicfVrrpTrackRowStatus.setStatus('current')
hpicf_vrrp_track_state = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 5, 1, 4), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('down', 0), ('up', 1)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hpicfVrrpTrackState.setStatus('current')
hpicf_vrrp_stats_table = mib_table((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 6))
if mibBuilder.loadTexts:
hpicfVrrpStatsTable.setStatus('current')
hpicf_vrrp_respond_to_ping = mib_scalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 7), truth_value().clone('false')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hpicfVrrpRespondToPing.setStatus('deprecated')
hpicf_vrrp_remove_config = mib_scalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 8), truth_value().clone('false')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hpicfVrrpRemoveConfig.setStatus('deprecated')
hpicf_vrrp_nonstop = mib_scalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 9), truth_value().clone('false')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hpicfVrrpNonstop.setStatus('deprecated')
hpicf_vrrp_stats_entry = mib_table_row((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 6, 1))
vrrpOperEntry.registerAugmentions(('HP-ICF-VRRP-MIB', 'hpicfVrrpStatsEntry'))
hpicfVrrpStatsEntry.setIndexNames(*vrrpOperEntry.getIndexNames())
if mibBuilder.loadTexts:
hpicfVrrpStatsEntry.setStatus('current')
hpicf_vrrp_stats_near_failovers = mib_table_column((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 1, 6, 1, 1), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hpicfVrrpStatsNearFailovers.setStatus('current')
hpicf_vrrp_mib_compliances = mib_identifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1))
hpicf_vrrp_mib_groups = mib_identifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2))
hpicf_vrrp_mib_compliance = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 1)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance = hpicfVrrpMIBCompliance.setStatus('deprecated')
hpicf_vrrp_mib_compliance1 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 2)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance1 = hpicfVrrpMIBCompliance1.setStatus('deprecated')
hpicf_vrrp_mib_compliance2 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 3)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpVrPingGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrPingGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance2 = hpicfVrrpMIBCompliance2.setStatus('current')
hpicf_vrrp_mib_compliance3 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 4)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpNonstopGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpNonstopGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance3 = hpicfVrrpMIBCompliance3.setStatus('deprecated')
hpicf_vrrp_mib_compliance4 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 5)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperationsGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance4 = hpicfVrrpMIBCompliance4.setStatus('deprecated')
hpicf_vrrp_mib_compliance5 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 6)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackGroup1'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackGroup1'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance5 = hpicfVrrpMIBCompliance5.setStatus('deprecated')
hpicf_vrrp_mib_compliance6 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 7)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup1'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackGroup1'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpOperGroup1'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackGroup1'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance6 = hpicfVrrpMIBCompliance6.setStatus('current')
hpicf_vrrp_mib_compliance7 = module_compliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 1, 8)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpOperationsGroup1'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_mib_compliance7 = hpicfVrrpMIBCompliance7.setStatus('current')
hpicf_vrrp_oper_group = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 1)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpAdminStatus'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrMode'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrMasterPreempt'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrTransferControl'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrPreemptDelayTime'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpAssoIpMask'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_oper_group = hpicfVrrpOperGroup.setStatus('deprecated')
hpicf_vrrp_track_group = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 2)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackRowStatus'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrControl'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_track_group = hpicfVrrpTrackGroup.setStatus('deprecated')
hpicf_vrrp_vr_ping_group = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 3)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpVrRespondToPing'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_vr_ping_group = hpicfVrrpVrPingGroup.setStatus('current')
hpicf_vrrp_nonstop_group = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 4)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpNonstop'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_nonstop_group = hpicfVrrpNonstopGroup.setStatus('deprecated')
hpicf_vrrp_operations_group = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 5)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpRespondToPing'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpRemoveConfig'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpStatsNearFailovers'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_operations_group = hpicfVrrpOperationsGroup.setStatus('deprecated')
hpicf_vrrp_track_group1 = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 6)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackRowStatus'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrControl'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpTrackState'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_track_group1 = hpicfVrrpTrackGroup1.setStatus('current')
hpicf_vrrp_oper_group1 = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 7)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpVrMode'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrMasterPreempt'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrTransferControl'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpVrPreemptDelayTime'), ('HP-ICF-VRRP-MIB', 'hpicfVrrpAssoIpMask'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_oper_group1 = hpicfVrrpOperGroup1.setStatus('current')
hpicf_vrrp_operations_group1 = object_group((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 31, 2, 2, 8)).setObjects(('HP-ICF-VRRP-MIB', 'hpicfVrrpStatsNearFailovers'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicf_vrrp_operations_group1 = hpicfVrrpOperationsGroup1.setStatus('current')
mibBuilder.exportSymbols('HP-ICF-VRRP-MIB', hpicfVrrpVrControl=hpicfVrrpVrControl, hpicfVrrpTrackEntry=hpicfVrrpTrackEntry, hpicfVrrpVrPreemptDelayTime=hpicfVrrpVrPreemptDelayTime, hpicfVrrpTrackTable=hpicfVrrpTrackTable, hpicfVrrpVrTrackEntity=hpicfVrrpVrTrackEntity, hpicfVrrpVrTrackType=hpicfVrrpVrTrackType, hpicfVrrpAssoIpAddrTable=hpicfVrrpAssoIpAddrTable, hpicfVrrpAdminStatus=hpicfVrrpAdminStatus, hpicfVrrpAssoIpAddrEntry=hpicfVrrpAssoIpAddrEntry, hpicfVrrpVrRespondToPing=hpicfVrrpVrRespondToPing, hpicfVrrpStatsEntry=hpicfVrrpStatsEntry, hpicfVrrpRemoveConfig=hpicfVrrpRemoveConfig, hpicfVrrpOperEntry=hpicfVrrpOperEntry, hpicfVrrpOperations=hpicfVrrpOperations, hpicfVrrpMIBCompliance=hpicfVrrpMIBCompliance, hpicfVrrpTrackGroup=hpicfVrrpTrackGroup, hpicfVrrpNonstop=hpicfVrrpNonstop, PYSNMP_MODULE_ID=hpicfVrrpMIB, hpicfVrrpMIBCompliance1=hpicfVrrpMIBCompliance1, hpicfVrrpVrMasterPreempt=hpicfVrrpVrMasterPreempt, hpicfVrrpMIBCompliance5=hpicfVrrpMIBCompliance5, hpicfVrrpNonstopGroup=hpicfVrrpNonstopGroup, hpicfVrrpMIBCompliance3=hpicfVrrpMIBCompliance3, hpicfVrrpTrackGroup1=hpicfVrrpTrackGroup1, hpicfVrrpVrPingGroup=hpicfVrrpVrPingGroup, hpicfVrrpVrMode=hpicfVrrpVrMode, hpicfVrrpOperationsGroup=hpicfVrrpOperationsGroup, hpicfVrrpTrackRowStatus=hpicfVrrpTrackRowStatus, hpicfVrrpConformance=hpicfVrrpConformance, hpicfVrrpMIB=hpicfVrrpMIB, hpicfVrrpMIBCompliance6=hpicfVrrpMIBCompliance6, hpicfVrrpOperGroup1=hpicfVrrpOperGroup1, hpicfVrrpVrTransferControl=hpicfVrrpVrTransferControl, hpicfVrrpMIBCompliances=hpicfVrrpMIBCompliances, hpicfVrrpRespondToPing=hpicfVrrpRespondToPing, hpicfVrrpAssoIpMask=hpicfVrrpAssoIpMask, hpicfVrrpMIBCompliance7=hpicfVrrpMIBCompliance7, hpicfVrrpOperationsGroup1=hpicfVrrpOperationsGroup1, hpicfVrrpStatsNearFailovers=hpicfVrrpStatsNearFailovers, hpicfVrrpStatsTable=hpicfVrrpStatsTable, hpicfVrrpOperGroup=hpicfVrrpOperGroup, hpicfVrrpMIBCompliance4=hpicfVrrpMIBCompliance4, hpicfVrrpMIBGroups=hpicfVrrpMIBGroups, hpicfVrrpMIBCompliance2=hpicfVrrpMIBCompliance2, hpicfVrrpOperTable=hpicfVrrpOperTable, hpicfVrrpTrackState=hpicfVrrpTrackState) |
def pytest_addoption(parser):
parser.addoption('--integration_tests', action='store_true', dest="integration_tests",
default=False, help="enable integration tests")
def pytest_configure(config):
if not config.option.integration_tests:
setattr(config.option, 'markexpr', 'not integration_tests')
| def pytest_addoption(parser):
parser.addoption('--integration_tests', action='store_true', dest='integration_tests', default=False, help='enable integration tests')
def pytest_configure(config):
if not config.option.integration_tests:
setattr(config.option, 'markexpr', 'not integration_tests') |
class CountdownCancelAll:
def __init__(self):
self.symbol = ""
self.countdownTime = 0
@staticmethod
def json_parse(json_data):
result = CountdownCancelAll()
result.symbol = json_data.get_string("symbol")
result.countdownTime = json_data.get_int("countdownTime")
return result
| class Countdowncancelall:
def __init__(self):
self.symbol = ''
self.countdownTime = 0
@staticmethod
def json_parse(json_data):
result = countdown_cancel_all()
result.symbol = json_data.get_string('symbol')
result.countdownTime = json_data.get_int('countdownTime')
return result |
"""Contains all variable for custom scripts"""
ENVS = {
"staging": {
"app": "sparte-staging",
"region": "osc-fr1",
},
"prod": {
"app": "sparte",
"region": "osc-secnum-fr1",
},
}
| """Contains all variable for custom scripts"""
envs = {'staging': {'app': 'sparte-staging', 'region': 'osc-fr1'}, 'prod': {'app': 'sparte', 'region': 'osc-secnum-fr1'}} |
def fn1(a,b):
print("Subtraction=",a-b)
def fn2(c):
print(c) | def fn1(a, b):
print('Subtraction=', a - b)
def fn2(c):
print(c) |
#taking values through keyboard
a,b=[int(i) for i in input('enter two numbers:').split(',')]
if(a>b):
print(a,'is big')
elif(a<b):
print(b,'is big')
else:
print('both are equal')
#taking values directly
a=5
b=3
if(a>b):
print(a,'is big')
elif(b>a):
print(b,'is big')
else:
print('both are equal') | (a, b) = [int(i) for i in input('enter two numbers:').split(',')]
if a > b:
print(a, 'is big')
elif a < b:
print(b, 'is big')
else:
print('both are equal')
a = 5
b = 3
if a > b:
print(a, 'is big')
elif b > a:
print(b, 'is big')
else:
print('both are equal') |
"""This file holds the payload yaml validator/definition template."""
VALIDATOR = {
"specification": {
"type": dict,
"required": True,
"childs": {
"payload": {
"type": str,
"required": True,
"allowed": "^(cmd)$",
"childs": {},
},
"type": {
"type": str,
"required": True,
"allowed": "^(revshell|bindshell)$",
"childs": {},
},
"version": {
"type": str,
"required": True,
"allowed": "^([0-9]\\.[0-9]\\.[0-9])$",
"childs": {},
},
},
},
"items": {
"type": list,
"required": True,
"childs": {
"name": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"desc": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"info": {
"type": list,
"required": True,
"childs": {},
},
"rating": {
"type": int,
"required": True,
"allowed": "^([0-9])$",
"childs": {},
},
"meta": {
"type": dict,
"required": True,
"childs": {
"author": {
"type": str,
"required": True,
"childs": {},
},
"editors": {
"type": list,
"required": True,
"childs": {},
},
"created": {
"type": str,
"required": True,
"allowed": "^([0-9]{4}-[0-9]{2}-[0-9]{2})$",
"childs": {},
},
"modified": {
"type": str,
"required": True,
"allowed": "^([0-9]{4}-[0-9]{2}-[0-9]{2})$",
"childs": {},
},
"version": {
"type": str,
"required": True,
"allowed": "^([0-9]\\.[0-9]\\.[0-9])$",
"childs": {},
},
},
},
"cmd": {
"type": dict,
"required": True,
"childs": {
"executable": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"requires": {
"type": dict,
"required": True,
"childs": {
"commands": {
"type": list,
"required": True,
"childs": {},
},
"shell_env": {
"type": list,
"required": True,
"childs": {},
},
"os": {
"type": list,
"required": True,
"childs": {},
},
},
},
},
},
"revshell": {
"type": dict,
"required": True,
"childs": {
"proto": {
"type": str,
"required": True,
"allowed": "^(tcp|udp)$",
"childs": {},
},
"shell": {
"type": str,
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
"command": {
"type": (str, type(None)),
"required": True,
"allowed": "^(.+)$",
"childs": {},
},
},
},
"payload": {
"type": str,
"required": True,
"allowed": "(.*__ADDR__.*__PORT__.*|.*__PORT__.*__ADDR__.*)",
"childs": {},
},
},
},
}
| """This file holds the payload yaml validator/definition template."""
validator = {'specification': {'type': dict, 'required': True, 'childs': {'payload': {'type': str, 'required': True, 'allowed': '^(cmd)$', 'childs': {}}, 'type': {'type': str, 'required': True, 'allowed': '^(revshell|bindshell)$', 'childs': {}}, 'version': {'type': str, 'required': True, 'allowed': '^([0-9]\\.[0-9]\\.[0-9])$', 'childs': {}}}}, 'items': {'type': list, 'required': True, 'childs': {'name': {'type': str, 'required': True, 'allowed': '^(.+)$', 'childs': {}}, 'desc': {'type': str, 'required': True, 'allowed': '^(.+)$', 'childs': {}}, 'info': {'type': list, 'required': True, 'childs': {}}, 'rating': {'type': int, 'required': True, 'allowed': '^([0-9])$', 'childs': {}}, 'meta': {'type': dict, 'required': True, 'childs': {'author': {'type': str, 'required': True, 'childs': {}}, 'editors': {'type': list, 'required': True, 'childs': {}}, 'created': {'type': str, 'required': True, 'allowed': '^([0-9]{4}-[0-9]{2}-[0-9]{2})$', 'childs': {}}, 'modified': {'type': str, 'required': True, 'allowed': '^([0-9]{4}-[0-9]{2}-[0-9]{2})$', 'childs': {}}, 'version': {'type': str, 'required': True, 'allowed': '^([0-9]\\.[0-9]\\.[0-9])$', 'childs': {}}}}, 'cmd': {'type': dict, 'required': True, 'childs': {'executable': {'type': str, 'required': True, 'allowed': '^(.+)$', 'childs': {}}, 'requires': {'type': dict, 'required': True, 'childs': {'commands': {'type': list, 'required': True, 'childs': {}}, 'shell_env': {'type': list, 'required': True, 'childs': {}}, 'os': {'type': list, 'required': True, 'childs': {}}}}}}, 'revshell': {'type': dict, 'required': True, 'childs': {'proto': {'type': str, 'required': True, 'allowed': '^(tcp|udp)$', 'childs': {}}, 'shell': {'type': str, 'required': True, 'allowed': '^(.+)$', 'childs': {}}, 'command': {'type': (str, type(None)), 'required': True, 'allowed': '^(.+)$', 'childs': {}}}}, 'payload': {'type': str, 'required': True, 'allowed': '(.*__ADDR__.*__PORT__.*|.*__PORT__.*__ADDR__.*)', 'childs': {}}}}} |
# Define a python class to execute sequencial action during a SOT execution.
#
# Examples would be:
# attime(1800,lambda: sigset(sot.damping,0.9))
# attime(1920,lambda: sigset(sot.damping,0.1))
# attime(1920,lambda: pop(tw))
# attime(850,lambda: sigset(taskSupportSmall.controlGain,0.01))
# @attime(400)
# def action400():
# print 'toto'
#
class attimeAlways:
None
ALWAYS = attimeAlways()
class attimeStop:
None
STOP = attimeStop()
class Calendar:
def __init__(self):
self.events = dict()
self.ping = list()
# self.periodic=list()
def __repr__(self):
res = ''
for iter, funpairs in sorted(self.events.iteritems()):
res += str(iter) + ": \n"
for funpair in funpairs:
if funpair[1] == '':
res += funpair[0] + '\n'
else:
res += str(funpair[1]) + '\n'
return res
def stop(self, *args):
self.registerEvents(STOP, *args)
def registerEvent(self, iter, pairfundoc):
# if iter==ALWAYS:
# self.periodic.append(pairfundoc)
# return
if iter == STOP:
self.events[ALWAYS].remove(pairfundoc)
if iter not in self.events.keys():
self.events[iter] = list()
self.events[iter].append(pairfundoc)
def registerEvents(self, iter, *funs):
'''
3 entry types are possible: 1. only the functor. 2. a pair
(functor,doc). 3. a list of pairs (functor,doc).
'''
if len(funs) == 2 and callable(funs[0]) and isinstance(funs[1], str):
self.registerEvent(iter, (funs[0], funs[1]))
else:
for fun in funs:
if isinstance(fun, tuple):
self.registerEvent(iter, fun)
else: # assert iscallable(fun)
if 'functor' in fun.__dict__:
self.registerEvent(iter,
(fun.functor, fun.functor.__doc__))
else:
self.registerEvent(iter, (fun, fun.__doc__))
def addPing(self, f):
self.ping.append(f)
def callPing(self):
for f in self.ping:
f()
def run(self, iter, *args):
if ALWAYS in self.events:
for fun, doc in self.events[ALWAYS]:
fun(*args)
if iter in self.events:
self.callPing()
for fun, doc in self.events[iter]:
intro = "At time " + str(iter) + ": "
if doc is not None:
print(intro, doc)
else:
if fun.__doc__ is not None:
print(intro, fun.__doc__)
else:
print(intro, "Runing ", fun)
fun(*args)
def __call__(self, iterarg, *funs):
if len(funs) == 0:
return self.generatorDecorator(iterarg)
else:
self.registerEvents(iterarg, *funs)
def generatorDecorator(self, iterarg):
"""
This next calling pattern is a little bit strange. Use it to decorate
a function definition: @attime(30) def run30(): ...
"""
class calendarDeco:
iterRef = iterarg
calendarRef = self
fun = None
def __init__(selfdeco, functer):
if functer.__doc__ is None:
functer.__doc__ = "No doc fun"
if len(functer.__doc__) > 0:
selfdeco.__doc__ = functer.__doc__
selfdeco.__doc__ += " (will be run at time " + str(
selfdeco.iterRef) + ")"
selfdeco.fun = functer
selfdeco.calendarRef.registerEvents(selfdeco.iterRef, functer,
functer.__doc__)
def __call__(selfdeco, *args):
selfdeco.fun(*args)
return calendarDeco
def fastForward(self, t):
for i in range(t + 1):
self.run(i)
attime = Calendar()
sigset = (lambda s, v: s.__class__.value.__set__(s, v))
refset = (lambda mt, v: mt.__class__.ref.__set__(mt, v))
| class Attimealways:
None
always = attime_always()
class Attimestop:
None
stop = attime_stop()
class Calendar:
def __init__(self):
self.events = dict()
self.ping = list()
def __repr__(self):
res = ''
for (iter, funpairs) in sorted(self.events.iteritems()):
res += str(iter) + ': \n'
for funpair in funpairs:
if funpair[1] == '':
res += funpair[0] + '\n'
else:
res += str(funpair[1]) + '\n'
return res
def stop(self, *args):
self.registerEvents(STOP, *args)
def register_event(self, iter, pairfundoc):
if iter == STOP:
self.events[ALWAYS].remove(pairfundoc)
if iter not in self.events.keys():
self.events[iter] = list()
self.events[iter].append(pairfundoc)
def register_events(self, iter, *funs):
"""
3 entry types are possible: 1. only the functor. 2. a pair
(functor,doc). 3. a list of pairs (functor,doc).
"""
if len(funs) == 2 and callable(funs[0]) and isinstance(funs[1], str):
self.registerEvent(iter, (funs[0], funs[1]))
else:
for fun in funs:
if isinstance(fun, tuple):
self.registerEvent(iter, fun)
elif 'functor' in fun.__dict__:
self.registerEvent(iter, (fun.functor, fun.functor.__doc__))
else:
self.registerEvent(iter, (fun, fun.__doc__))
def add_ping(self, f):
self.ping.append(f)
def call_ping(self):
for f in self.ping:
f()
def run(self, iter, *args):
if ALWAYS in self.events:
for (fun, doc) in self.events[ALWAYS]:
fun(*args)
if iter in self.events:
self.callPing()
for (fun, doc) in self.events[iter]:
intro = 'At time ' + str(iter) + ': '
if doc is not None:
print(intro, doc)
elif fun.__doc__ is not None:
print(intro, fun.__doc__)
else:
print(intro, 'Runing ', fun)
fun(*args)
def __call__(self, iterarg, *funs):
if len(funs) == 0:
return self.generatorDecorator(iterarg)
else:
self.registerEvents(iterarg, *funs)
def generator_decorator(self, iterarg):
"""
This next calling pattern is a little bit strange. Use it to decorate
a function definition: @attime(30) def run30(): ...
"""
class Calendardeco:
iter_ref = iterarg
calendar_ref = self
fun = None
def __init__(selfdeco, functer):
if functer.__doc__ is None:
functer.__doc__ = 'No doc fun'
if len(functer.__doc__) > 0:
selfdeco.__doc__ = functer.__doc__
selfdeco.__doc__ += ' (will be run at time ' + str(selfdeco.iterRef) + ')'
selfdeco.fun = functer
selfdeco.calendarRef.registerEvents(selfdeco.iterRef, functer, functer.__doc__)
def __call__(selfdeco, *args):
selfdeco.fun(*args)
return calendarDeco
def fast_forward(self, t):
for i in range(t + 1):
self.run(i)
attime = calendar()
sigset = lambda s, v: s.__class__.value.__set__(s, v)
refset = lambda mt, v: mt.__class__.ref.__set__(mt, v) |
def flatten(lst):
if lst:
car,*cdr=lst
if isinstance(car,(list,tuple)):
if cdr: return flatten(car) + flatten(cdr)
return flatten(car)
if cdr: return [car] + flatten(cdr)
return [car]
| def flatten(lst):
if lst:
(car, *cdr) = lst
if isinstance(car, (list, tuple)):
if cdr:
return flatten(car) + flatten(cdr)
return flatten(car)
if cdr:
return [car] + flatten(cdr)
return [car] |
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
n = len(nums)
for i in range(n-2):
if i > 0 and nums[i] == nums[i-1]:
continue
j, k = i + 1, n - 1
while j < k:
_sum = nums[i] + nums[j] + nums[k]
if _sum == 0:
res.append([nums[i], nums[j], nums[k]])
j += 1
k -= 1
while j < k and nums[j] == nums[j-1]:
j += 1
while j < k and nums[k] == nums[k+1]:
k -= 1
elif _sum < 0:
j += 1
else:
k -= 1
return res | class Solution:
def three_sum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
n = len(nums)
for i in range(n - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
(j, k) = (i + 1, n - 1)
while j < k:
_sum = nums[i] + nums[j] + nums[k]
if _sum == 0:
res.append([nums[i], nums[j], nums[k]])
j += 1
k -= 1
while j < k and nums[j] == nums[j - 1]:
j += 1
while j < k and nums[k] == nums[k + 1]:
k -= 1
elif _sum < 0:
j += 1
else:
k -= 1
return res |
def min_rooms_required(intervals):
'''Returns the minimum amount of rooms necessary.
Input consists of a list of time-intervals (start, stop):
>>> min_rooms_required([(10, 20), (15, 25)])
2
>>> min_rooms_required([(10, 20), (20, 30)])
1
'''
# transform intervals
# [(10, 20), (15, 25)]
# to
# [(10, 1), (15, 1), (20, -1), (25, -1)]
times = []
for interval in intervals:
start, stop = interval
assert start < stop
times.append((start, 1))
times.append((stop, -1))
times.sort()
rooms_required = 0
current_rooms_taken = 0
for _, status in times:
current_rooms_taken += status
rooms_required = max(current_rooms_taken, rooms_required)
return rooms_required
| def min_rooms_required(intervals):
"""Returns the minimum amount of rooms necessary.
Input consists of a list of time-intervals (start, stop):
>>> min_rooms_required([(10, 20), (15, 25)])
2
>>> min_rooms_required([(10, 20), (20, 30)])
1
"""
times = []
for interval in intervals:
(start, stop) = interval
assert start < stop
times.append((start, 1))
times.append((stop, -1))
times.sort()
rooms_required = 0
current_rooms_taken = 0
for (_, status) in times:
current_rooms_taken += status
rooms_required = max(current_rooms_taken, rooms_required)
return rooms_required |
class Solution:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(reverse=True,key=lambda x:(x[1],-x[0]))
# print(intervals)
slen = len(intervals)
count = slen
for i in range(slen-1):
# print(intervals[i],intervals[i+1])
if intervals[i][0]<=intervals[i+1][0] and intervals[i][1]>=intervals[i+1][1]:
# print("wh")
intervals[i+1] = intervals[i]
count-=1
return count
| class Solution:
def remove_covered_intervals(self, intervals: List[List[int]]) -> int:
intervals.sort(reverse=True, key=lambda x: (x[1], -x[0]))
slen = len(intervals)
count = slen
for i in range(slen - 1):
if intervals[i][0] <= intervals[i + 1][0] and intervals[i][1] >= intervals[i + 1][1]:
intervals[i + 1] = intervals[i]
count -= 1
return count |
# 312. Burst Balloons
# Runtime: 8448 ms, faster than 44.62% of Python3 online submissions for Burst Balloons.
# Memory Usage: 19.9 MB, less than 38.33% of Python3 online submissions for Burst Balloons.
class Solution:
def maxCoins(self, nums: list[int]) -> int:
n = len(nums)
# Edge case
nums = [1] + nums + [1]
scores = [[0 for _ in range(len(nums))] for _ in range(len(nums))]
for i in range(n, -1, -1):
for j in range(i + 1, n + 2):
# The last burst.
for k in range(i + 1, j):
scores[i][j] = max(scores[i][j], scores[i][k] + scores[k][j] + nums[i] * nums[k] * nums[j])
return scores[0][n + 1] | class Solution:
def max_coins(self, nums: list[int]) -> int:
n = len(nums)
nums = [1] + nums + [1]
scores = [[0 for _ in range(len(nums))] for _ in range(len(nums))]
for i in range(n, -1, -1):
for j in range(i + 1, n + 2):
for k in range(i + 1, j):
scores[i][j] = max(scores[i][j], scores[i][k] + scores[k][j] + nums[i] * nums[k] * nums[j])
return scores[0][n + 1] |
## Implementation of a recursive fibonacci series. Extremely inefficient though.
## Author: AJ
class fibonacci:
def __init__(self):
self.number = 0
self.series = []
def fib_series(self, num):
if num <=2:
if 1 not in self.series:
self.series.append(1)
#self.series = list(set(self.series))
return 1
next = self.fib_series(num-1) + self.fib_series(num-2)
if next not in self.series:
self.series.append(next)
#self.series = list(set(self.series))
return next
def print_fib_series(self):
for ele in self.series:
print(ele, end=' ')
fib = fibonacci()
num = int(input())
fib.fib_series(num)
fib.series.insert(0, 0)
fib.series.insert(1, 1)
fib.print_fib_series()
| class Fibonacci:
def __init__(self):
self.number = 0
self.series = []
def fib_series(self, num):
if num <= 2:
if 1 not in self.series:
self.series.append(1)
return 1
next = self.fib_series(num - 1) + self.fib_series(num - 2)
if next not in self.series:
self.series.append(next)
return next
def print_fib_series(self):
for ele in self.series:
print(ele, end=' ')
fib = fibonacci()
num = int(input())
fib.fib_series(num)
fib.series.insert(0, 0)
fib.series.insert(1, 1)
fib.print_fib_series() |
#inherits , extend , override
class Employee:
def __init__(self , name , age , salary):
self.name = name
self.age = age
self.salary = salary
def work(self):
print( f"{self.name} is working..." )
class SoftwareEngineer(Employee):
def __init__(self , name , age , salary , level):
# super().__init__(name , age , salary)
super(SoftwareEngineer , self).__init__(name , age , salary)
self.level = level
def debug(self):
print( f"{self.name} is debugging..." )
def work(self):
print( f"{self.name} is coding..." )
class Designer(Employee):
def work(self):
print( f"{self.name} is designing..." )
def draw(self):
print( f"{self.name} is drawing..." )
# se = SoftwareEngineer('Hussein' , 22 , 5000 , 'Junior')
# print( se.name , se.age)
# print(se.level)
# se.work()
# se.debug()
#
# d = Designer('Lisa' , 22 , 4000)
# print( d.name , d.age)
# d.work()
# d.draw()
#polymorphism
employees = [
SoftwareEngineer('Sara' , 22 , 4445 , 'Junior'),
SoftwareEngineer('Moataz' , 22 , 9000 , 'Senior'),
Designer('Asmaa' , 20 , 9000)
]
def motivate_employees(employees):
for employee in employees:
employee.work()
motivate_employees(employees)
#Recap
#inheritance: ChildClass(BaseClass)
#inherits , extends , overrides
#super().__init__() or super(class , self).__init__()
#polymorphism
| class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def work(self):
print(f'{self.name} is working...')
class Softwareengineer(Employee):
def __init__(self, name, age, salary, level):
super(SoftwareEngineer, self).__init__(name, age, salary)
self.level = level
def debug(self):
print(f'{self.name} is debugging...')
def work(self):
print(f'{self.name} is coding...')
class Designer(Employee):
def work(self):
print(f'{self.name} is designing...')
def draw(self):
print(f'{self.name} is drawing...')
employees = [software_engineer('Sara', 22, 4445, 'Junior'), software_engineer('Moataz', 22, 9000, 'Senior'), designer('Asmaa', 20, 9000)]
def motivate_employees(employees):
for employee in employees:
employee.work()
motivate_employees(employees) |
MODULE_CONTEXT = {'metadata':{'module':'ANUVAAD-NMT-MODELS'}}
def init():
global app_context
app_context = {
'application_context' : None
} | module_context = {'metadata': {'module': 'ANUVAAD-NMT-MODELS'}}
def init():
global app_context
app_context = {'application_context': None} |
class TestStackiBoxInfo:
def test_no_name(self, run_ansible_module):
result = run_ansible_module("stacki_box_info")
assert result.status == "SUCCESS"
assert result.data["changed"] == False
assert len(result.data["boxes"]) == 2
def test_with_name(self, run_ansible_module):
result = run_ansible_module("stacki_box_info", name="default")
assert result.status == "SUCCESS"
assert result.data["changed"] == False
assert len(result.data["boxes"]) == 1
assert result.data["boxes"][0]["name"] == "default"
def test_bad_name(self, run_ansible_module):
result = run_ansible_module("stacki_box_info", name="foo")
assert result.status == "FAILED!"
assert result.data["changed"] == False
assert "error" in result.data["msg"]
assert "not a valid box" in result.data["msg"]
| class Teststackiboxinfo:
def test_no_name(self, run_ansible_module):
result = run_ansible_module('stacki_box_info')
assert result.status == 'SUCCESS'
assert result.data['changed'] == False
assert len(result.data['boxes']) == 2
def test_with_name(self, run_ansible_module):
result = run_ansible_module('stacki_box_info', name='default')
assert result.status == 'SUCCESS'
assert result.data['changed'] == False
assert len(result.data['boxes']) == 1
assert result.data['boxes'][0]['name'] == 'default'
def test_bad_name(self, run_ansible_module):
result = run_ansible_module('stacki_box_info', name='foo')
assert result.status == 'FAILED!'
assert result.data['changed'] == False
assert 'error' in result.data['msg']
assert 'not a valid box' in result.data['msg'] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Indian - Purchase Report(GST)',
'version': '1.0',
'description': """GST Purchase Report""",
'category': 'Accounting',
'depends': [
'l10n_in',
'purchase',
],
'data': [
'views/report_purchase_order.xml',
],
'installable': True,
'application': False,
'auto_install': True,
}
| {'name': 'Indian - Purchase Report(GST)', 'version': '1.0', 'description': 'GST Purchase Report', 'category': 'Accounting', 'depends': ['l10n_in', 'purchase'], 'data': ['views/report_purchase_order.xml'], 'installable': True, 'application': False, 'auto_install': True} |
# Dictionaries
# https://www.freecodecamp.org/learn/scientific-computing-with-python/python-for-everybody/python-dictionaries
# A collection of values each with their own key identifiers - like a java hashmap
ddd = dict()
ddd['age'] = 21
ddd['course'] = 182
print(ddd)
ddd['age'] = 27
print(ddd)
# Literals
jjj = {'name': 'bob', 'age': 21}
print(jjj)
# Counting occurrences
ccc = dict()
ccc['count1'] = 1
ccc['count2'] = 1
print(ccc)
ccc['count1'] = ccc['count1'] + 1
print(ccc)
# Counting names ex.
counts = dict()
names = ['bob', 'bob6', 'bob4', 'bob2', 'bob', 'bob6']
for name in names:
if name not in counts:
counts[name] = 1
else:
counts[name] += 1
print(counts)
# The get method
if name in counts:
x = counts[name]
else:
x = 0
# get returns the value for a specified key in the dictionary
x = counts.get(name, 0) # 0 is the default value
# Counting w/ get
counts = dict()
for name in names:
counts[name] = counts.get(name, 0) + 1
print(counts)
| ddd = dict()
ddd['age'] = 21
ddd['course'] = 182
print(ddd)
ddd['age'] = 27
print(ddd)
jjj = {'name': 'bob', 'age': 21}
print(jjj)
ccc = dict()
ccc['count1'] = 1
ccc['count2'] = 1
print(ccc)
ccc['count1'] = ccc['count1'] + 1
print(ccc)
counts = dict()
names = ['bob', 'bob6', 'bob4', 'bob2', 'bob', 'bob6']
for name in names:
if name not in counts:
counts[name] = 1
else:
counts[name] += 1
print(counts)
if name in counts:
x = counts[name]
else:
x = 0
x = counts.get(name, 0)
counts = dict()
for name in names:
counts[name] = counts.get(name, 0) + 1
print(counts) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_wait_for_process
version_added: '2.7'
short_description: Waits for a process to exist or not exist before continuing.
description:
- Waiting for a process to start or stop.
- This is useful when Windows services behave poorly and do not enumerate external dependencies in their manifest.
options:
process_name_exact:
description:
- The name of the process(es) for which to wait.
type: str
process_name_pattern:
description:
- RegEx pattern matching desired process(es).
type: str
sleep:
description:
- Number of seconds to sleep between checks.
- Only applies when waiting for a process to start. Waiting for a process to start
does not have a native non-polling mechanism. Waiting for a stop uses native PowerShell
and does not require polling.
type: int
default: 1
process_min_count:
description:
- Minimum number of process matching the supplied pattern to satisfy C(present) condition.
- Only applies to C(present).
type: int
default: 1
pid:
description:
- The PID of the process.
type: int
owner:
description:
- The owner of the process.
- Requires PowerShell version 4.0 or newer.
type: str
pre_wait_delay:
description:
- Seconds to wait before checking processes.
type: int
default: 0
post_wait_delay:
description:
- Seconds to wait after checking for processes.
type: int
default: 0
state:
description:
- When checking for a running process C(present) will block execution
until the process exists, or until the timeout has been reached.
C(absent) will block execution untile the processs no longer exists,
or until the timeout has been reached.
- When waiting for C(present), the module will return changed only if
the process was not present on the initial check but became present on
subsequent checks.
- If, while waiting for C(absent), new processes matching the supplied
pattern are started, these new processes will not be included in the
action.
type: str
default: present
choices: [ absent, present ]
timeout:
description:
- The maximum number of seconds to wait for a for a process to start or stop
before erroring out.
type: int
default: 300
author:
- Charles Crossan (@crossan007)
'''
EXAMPLES = r'''
- name: Wait 300 seconds for all Oracle VirtualBox processes to stop. (VBoxHeadless, VirtualBox, VBoxSVC)
win_wait_for_process:
process_name: 'v(irtual)?box(headless|svc)?'
state: absent
timeout: 500
- name: Wait 300 seconds for 3 instances of cmd to start, waiting 5 seconds between each check
win_wait_for_process:
process_name_exact: cmd
state: present
timeout: 500
sleep: 5
process_min_count: 3
'''
RETURN = r'''
elapsed:
description: The elapsed seconds between the start of poll and the end of the module.
returned: always
type: float
sample: 3.14159265
matched_processes:
description: List of matched processes (either stopped or started)
returned: always
type: complex
contains:
name:
description: The name of the matched process
returned: always
type: str
sample: svchost
owner:
description: The owner of the matched process
returned: when supported by PowerShell
type: str
sample: NT AUTHORITY\SYSTEM
pid:
description: The PID of the matched process
returned: always
type: int
sample: 7908
'''
| ansible_metadata = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
documentation = "\n---\nmodule: win_wait_for_process\nversion_added: '2.7'\nshort_description: Waits for a process to exist or not exist before continuing.\ndescription:\n- Waiting for a process to start or stop.\n- This is useful when Windows services behave poorly and do not enumerate external dependencies in their manifest.\noptions:\n process_name_exact:\n description:\n - The name of the process(es) for which to wait.\n type: str\n process_name_pattern:\n description:\n - RegEx pattern matching desired process(es).\n type: str\n sleep:\n description:\n - Number of seconds to sleep between checks.\n - Only applies when waiting for a process to start. Waiting for a process to start\n does not have a native non-polling mechanism. Waiting for a stop uses native PowerShell\n and does not require polling.\n type: int\n default: 1\n process_min_count:\n description:\n - Minimum number of process matching the supplied pattern to satisfy C(present) condition.\n - Only applies to C(present).\n type: int\n default: 1\n pid:\n description:\n - The PID of the process.\n type: int\n owner:\n description:\n - The owner of the process.\n - Requires PowerShell version 4.0 or newer.\n type: str\n pre_wait_delay:\n description:\n - Seconds to wait before checking processes.\n type: int\n default: 0\n post_wait_delay:\n description:\n - Seconds to wait after checking for processes.\n type: int\n default: 0\n state:\n description:\n - When checking for a running process C(present) will block execution\n until the process exists, or until the timeout has been reached.\n C(absent) will block execution untile the processs no longer exists,\n or until the timeout has been reached.\n - When waiting for C(present), the module will return changed only if\n the process was not present on the initial check but became present on\n subsequent checks.\n - If, while waiting for C(absent), new processes matching the supplied\n pattern are started, these new processes will not be included in the\n action.\n type: str\n default: present\n choices: [ absent, present ]\n timeout:\n description:\n - The maximum number of seconds to wait for a for a process to start or stop\n before erroring out.\n type: int\n default: 300\nauthor:\n- Charles Crossan (@crossan007)\n"
examples = "\n- name: Wait 300 seconds for all Oracle VirtualBox processes to stop. (VBoxHeadless, VirtualBox, VBoxSVC)\n win_wait_for_process:\n process_name: 'v(irtual)?box(headless|svc)?'\n state: absent\n timeout: 500\n\n- name: Wait 300 seconds for 3 instances of cmd to start, waiting 5 seconds between each check\n win_wait_for_process:\n process_name_exact: cmd\n state: present\n timeout: 500\n sleep: 5\n process_min_count: 3\n"
return = '\nelapsed:\n description: The elapsed seconds between the start of poll and the end of the module.\n returned: always\n type: float\n sample: 3.14159265\nmatched_processes:\n description: List of matched processes (either stopped or started)\n returned: always\n type: complex\n contains:\n name:\n description: The name of the matched process\n returned: always\n type: str\n sample: svchost\n owner:\n description: The owner of the matched process\n returned: when supported by PowerShell\n type: str\n sample: NT AUTHORITY\\SYSTEM\n pid:\n description: The PID of the matched process\n returned: always\n type: int\n sample: 7908\n' |
file = 'fer2018surprise.csv'
file_path = '../data/original/'+file
new_file_path = '../data/converted/'+file
num = 0
with open(file_path, 'r') as fp:
with open(new_file_path, 'w') as fp2:
num = num + 1
line = fp.readline()
while line:
lineWithCommas = line.replace(' ', ',')
fp2.write(lineWithCommas)
line = fp.readline()
num = num+1
if num % 100 == 0:
print("Working on line", num)
print("DONE!") | file = 'fer2018surprise.csv'
file_path = '../data/original/' + file
new_file_path = '../data/converted/' + file
num = 0
with open(file_path, 'r') as fp:
with open(new_file_path, 'w') as fp2:
num = num + 1
line = fp.readline()
while line:
line_with_commas = line.replace(' ', ',')
fp2.write(lineWithCommas)
line = fp.readline()
num = num + 1
if num % 100 == 0:
print('Working on line', num)
print('DONE!') |
__author__ = 'BeyondSky'
class Solution:
def climbStairs_dp(self, n):
if n < 2:
return 1
steps = []
steps.append(1)
steps.append(2)
for i in range(2,n):
steps.append(steps[i-1] + steps[i-2])
return steps[n-1]
def main():
outer = Solution();
print(outer.climbStairs_dp(4))
if __name__ == "__main__":
main() | __author__ = 'BeyondSky'
class Solution:
def climb_stairs_dp(self, n):
if n < 2:
return 1
steps = []
steps.append(1)
steps.append(2)
for i in range(2, n):
steps.append(steps[i - 1] + steps[i - 2])
return steps[n - 1]
def main():
outer = solution()
print(outer.climbStairs_dp(4))
if __name__ == '__main__':
main() |
# Maximum Erasure Value
'''
You are given an array of positive integers nums and want to erase a subarray containing unique elements. The score you get by erasing the subarray is equal to the sum of its elements.
Return the maximum score you can get by erasing exactly one subarray.
An array b is called to be a subarray of a if it forms a contiguous subsequence of a, that is, if it is equal to a[l],a[l+1],...,a[r] for some (l,r).
Example 1:
Input: nums = [4,2,4,5,6]
Output: 17
Explanation: The optimal subarray here is [2,4,5,6].
Example 2:
Input: nums = [5,2,1,2,5,2,1,2,5]
Output: 8
Explanation: The optimal subarray here is [5,2,1] or [1,2,5].
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 104
Hide Hint #1
The main point here is for the subarray to contain unique elements for each index. Only the first subarrays starting from that index have unique elements.
Hide Hint #2
This can be solved using the two pointers technique
'''
class Solution:
def maximumUniqueSubarray(self, nums: List[int]) -> int:
res,s,e = 0,0,0
add = 0
return 1
for i in range(len(nums)):
if nums[i] not in nums[s:i]:
add = add+nums[i]
print('if')
else:
s = s+nums[s:i].index(nums[i])+1
add = sum(nums[s:i+1])
print('else')
e = i
res = max(res,add)
print(s,e,add, res)
return res
| """
You are given an array of positive integers nums and want to erase a subarray containing unique elements. The score you get by erasing the subarray is equal to the sum of its elements.
Return the maximum score you can get by erasing exactly one subarray.
An array b is called to be a subarray of a if it forms a contiguous subsequence of a, that is, if it is equal to a[l],a[l+1],...,a[r] for some (l,r).
Example 1:
Input: nums = [4,2,4,5,6]
Output: 17
Explanation: The optimal subarray here is [2,4,5,6].
Example 2:
Input: nums = [5,2,1,2,5,2,1,2,5]
Output: 8
Explanation: The optimal subarray here is [5,2,1] or [1,2,5].
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 104
Hide Hint #1
The main point here is for the subarray to contain unique elements for each index. Only the first subarrays starting from that index have unique elements.
Hide Hint #2
This can be solved using the two pointers technique
"""
class Solution:
def maximum_unique_subarray(self, nums: List[int]) -> int:
(res, s, e) = (0, 0, 0)
add = 0
return 1
for i in range(len(nums)):
if nums[i] not in nums[s:i]:
add = add + nums[i]
print('if')
else:
s = s + nums[s:i].index(nums[i]) + 1
add = sum(nums[s:i + 1])
print('else')
e = i
res = max(res, add)
print(s, e, add, res)
return res |
# class Queue(object):
# def __init__(self):
# """
# initialize your data structure here.
# """
# self.stack1 = []
# self.stack2 = []
#
#
# def push(self, x):
# """
# :type x: int
# :rtype: nothing
# """
# while len(self.stack1) > 0:
# curr = self.stack1.pop()
# self.stack2.append(curr)
# self.stack1.append(x)
# while len(self.stack2) > 0:
# curr = self.stack2.pop()
# self.stack1.append(curr)
#
# def pop(self):
# """
# :rtype: nothing
# """
# self.stack1.pop()
#
#
# def peek(self):
# """
# :rtype: int
# """
# return self.stack1[-1]
#
# def empty(self):
# """
# :rtype: bool
# """
# return len(self.stack1) == 0
class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.main_stack = []
self.temp_stack = []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.main_stack.append(x)
def pop(self):
"""
:rtype: nothing
"""
while self.main_stack:
self.temp_stack.append(self.main_stack.pop())
ret = self.temp_stack.pop()
while self.temp_stack:
self.main_stack.append(self.temp_stack.pop())
return ret
def peek(self):
"""
:rtype: int
"""
while self.main_stack:
self.temp_stack.append(self.main_stack.pop())
ret = self.temp_stack[-1]
while self.temp_stack:
self.main_stack.append(self.temp_stack.pop())
return ret
def empty(self):
"""
:rtype: bool
"""
return len(self.main_stack) == 0
| class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.main_stack = []
self.temp_stack = []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.main_stack.append(x)
def pop(self):
"""
:rtype: nothing
"""
while self.main_stack:
self.temp_stack.append(self.main_stack.pop())
ret = self.temp_stack.pop()
while self.temp_stack:
self.main_stack.append(self.temp_stack.pop())
return ret
def peek(self):
"""
:rtype: int
"""
while self.main_stack:
self.temp_stack.append(self.main_stack.pop())
ret = self.temp_stack[-1]
while self.temp_stack:
self.main_stack.append(self.temp_stack.pop())
return ret
def empty(self):
"""
:rtype: bool
"""
return len(self.main_stack) == 0 |
# --------------------------------------------------------------
class ModelSimilarity:
'''
Uses a model (e.g. Word2Vec model) to calculate the similarity between two terms.
'''
def __init__( self, model ):
self.model = model
def similarity( self, ranking_i, ranking_j ):
sim = 0.0
pairs = 0
for term_i in ranking_i:
for term_j in ranking_j:
try:
sim += self.model.similarity(term_i, term_j)
pairs += 1
except:
#print "Failed pair (%s,%s)" % (term_i,term_j)
pass
if pairs == 0:
return 0.0
return sim/pairs
# --------------------------------------------------------------
class WithinTopicMeasure:
'''
Measures within-topic coherence for a topic model, based on a set of term rankings.
'''
def __init__( self, metric ):
self.metric = metric
def evaluate_ranking( self, term_ranking ):
return self.metric.similarity( term_ranking, term_ranking )
def evaluate_rankings( self, term_rankings ):
scores = []
overall = 0.0
for topic_index in range(len(term_rankings)):
score = self.evaluate_ranking( term_rankings[topic_index] )
scores.append( score )
overall += score
overall /= len(term_rankings)
return overall
| class Modelsimilarity:
"""
Uses a model (e.g. Word2Vec model) to calculate the similarity between two terms.
"""
def __init__(self, model):
self.model = model
def similarity(self, ranking_i, ranking_j):
sim = 0.0
pairs = 0
for term_i in ranking_i:
for term_j in ranking_j:
try:
sim += self.model.similarity(term_i, term_j)
pairs += 1
except:
pass
if pairs == 0:
return 0.0
return sim / pairs
class Withintopicmeasure:
"""
Measures within-topic coherence for a topic model, based on a set of term rankings.
"""
def __init__(self, metric):
self.metric = metric
def evaluate_ranking(self, term_ranking):
return self.metric.similarity(term_ranking, term_ranking)
def evaluate_rankings(self, term_rankings):
scores = []
overall = 0.0
for topic_index in range(len(term_rankings)):
score = self.evaluate_ranking(term_rankings[topic_index])
scores.append(score)
overall += score
overall /= len(term_rankings)
return overall |
xs1 = ys[42:
5:
-1]
xs2 = ys[:
2:
3]
xs3 = ys[::
3]
| xs1 = ys[42:5:-1]
xs2 = ys[:2:3]
xs3 = ys[::3] |
class Stack:
topNode = None
class Node:
def __init__(self, value):
self.value = value
self.nextNode = None
def __repr__(self):
return "[{}]".format(self.value)
def __init__(self, iterable):
if len(iterable) != 0:
for k in iterable:
new_node = self.Node(k)
new_node.nextNode = self.topNode
self.topNode = new_node
def __repr__(self):
lines = []
working_node = self.topNode
if working_node is None:
return "[EmptyStack]"
while working_node is not None:
lines.append(str(working_node))
working_node = working_node.nextNode
return "\n |\n".join(lines)
def peek(self):
return self.topNode.value
def push(self, value):
new_node = self.Node(value)
new_node.nextNode = self.topNode
self.topNode = new_node
def pop(self):
self.topNode = self.topNode.nextNode
def is_empty(self):
if self.topNode is None:
return True
return False
if __name__ == '__main__':
stack = Stack([3, 5])
stack.push(6)
stack.pop()
print(stack)
stack.pop()
stack.pop()
print(stack, stack.is_empty())
| class Stack:
top_node = None
class Node:
def __init__(self, value):
self.value = value
self.nextNode = None
def __repr__(self):
return '[{}]'.format(self.value)
def __init__(self, iterable):
if len(iterable) != 0:
for k in iterable:
new_node = self.Node(k)
new_node.nextNode = self.topNode
self.topNode = new_node
def __repr__(self):
lines = []
working_node = self.topNode
if working_node is None:
return '[EmptyStack]'
while working_node is not None:
lines.append(str(working_node))
working_node = working_node.nextNode
return '\n |\n'.join(lines)
def peek(self):
return self.topNode.value
def push(self, value):
new_node = self.Node(value)
new_node.nextNode = self.topNode
self.topNode = new_node
def pop(self):
self.topNode = self.topNode.nextNode
def is_empty(self):
if self.topNode is None:
return True
return False
if __name__ == '__main__':
stack = stack([3, 5])
stack.push(6)
stack.pop()
print(stack)
stack.pop()
stack.pop()
print(stack, stack.is_empty()) |
# Multicollinearity solution using VIF 26/6/19
def calculate_vif_(X, thresh=5.0):
"""
Linear variational inflation factor for multi-colinearity solutions
Removes colinear features with messages
X is patients by features DataFrame
Good idea to inspect the VIFs for all data and consider threshold changes?
Returns remaining X without the omitted features and list of VIFs
"""
features = list(range(X.shape[1]))
dropped = True
while dropped:
dropped = False
vif = [variance_inflation_factor(X.iloc[:, features].values, ix)
for ix in range(X.iloc[:, features].shape[1])]
maxloc = vif.index(max(vif))
if max(vif) > thresh:
print('dropping \'' + X.iloc[:, features].columns[maxloc] +
'\' at index: ' + str(maxloc))
print('VIF = ', vif[maxloc])
print('\n\n all VIFs = ', vif)
del features[maxloc]
dropped = True
print('Remaining features:')
print(X.columns[features])
return X.iloc[:, features], vif | def calculate_vif_(X, thresh=5.0):
"""
Linear variational inflation factor for multi-colinearity solutions
Removes colinear features with messages
X is patients by features DataFrame
Good idea to inspect the VIFs for all data and consider threshold changes?
Returns remaining X without the omitted features and list of VIFs
"""
features = list(range(X.shape[1]))
dropped = True
while dropped:
dropped = False
vif = [variance_inflation_factor(X.iloc[:, features].values, ix) for ix in range(X.iloc[:, features].shape[1])]
maxloc = vif.index(max(vif))
if max(vif) > thresh:
print("dropping '" + X.iloc[:, features].columns[maxloc] + "' at index: " + str(maxloc))
print('VIF = ', vif[maxloc])
print('\n\n all VIFs = ', vif)
del features[maxloc]
dropped = True
print('Remaining features:')
print(X.columns[features])
return (X.iloc[:, features], vif) |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = 'Testing ATS active timeout'
Test.SkipUnless(
Condition.HasCurlFeature('http2')
)
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server", delay=8)
request_header = {"headers": "GET /file HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "5678", "body": ""}
server.addResponse("sessionfile.log", request_header, response_header)
ts.addSSLfile("../tls/ssl/server.pem")
ts.addSSLfile("../tls/ssl/server.key")
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.remap_required': 1,
'proxy.config.http.transaction_active_timeout_out': 2,
})
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}/'.format(server.Variables.Port))
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
tr = Test.AddTestRun("tr")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.Command = 'curl -i http://127.0.0.1:{0}/file'.format(ts.Variables.port)
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Activity Timeout", "Request should fail with active timeout")
tr2 = Test.AddTestRun("tr")
tr2.Processes.Default.Command = 'curl -k -i --http1.1 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr2.Processes.Default.Streams.stdout = Testers.ContainsExpression("Activity Timeout", "Request should fail with active timeout")
tr3 = Test.AddTestRun("tr")
tr3.Processes.Default.Command = 'curl -k -i --http2 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr3.Processes.Default.Streams.stdout = Testers.ContainsExpression("Activity Timeout", "Request should fail with active timeout")
| """
"""
Test.Summary = 'Testing ATS active timeout'
Test.SkipUnless(Condition.HasCurlFeature('http2'))
ts = Test.MakeATSProcess('ts', select_ports=True, enable_tls=True)
server = Test.MakeOriginServer('server', delay=8)
request_header = {'headers': 'GET /file HTTP/1.1\r\nHost: *\r\n\r\n', 'timestamp': '5678', 'body': ''}
response_header = {'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '5678', 'body': ''}
server.addResponse('sessionfile.log', request_header, response_header)
ts.addSSLfile('../tls/ssl/server.pem')
ts.addSSLfile('../tls/ssl/server.key')
ts.Disk.records_config.update({'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), 'proxy.config.url_remap.remap_required': 1, 'proxy.config.http.transaction_active_timeout_out': 2})
ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port))
ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key')
tr = Test.AddTestRun('tr')
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.Command = 'curl -i http://127.0.0.1:{0}/file'.format(ts.Variables.port)
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression('Activity Timeout', 'Request should fail with active timeout')
tr2 = Test.AddTestRun('tr')
tr2.Processes.Default.Command = 'curl -k -i --http1.1 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr2.Processes.Default.Streams.stdout = Testers.ContainsExpression('Activity Timeout', 'Request should fail with active timeout')
tr3 = Test.AddTestRun('tr')
tr3.Processes.Default.Command = 'curl -k -i --http2 https://127.0.0.1:{0}/file'.format(ts.Variables.ssl_port)
tr3.Processes.Default.Streams.stdout = Testers.ContainsExpression('Activity Timeout', 'Request should fail with active timeout') |
class take_skip:
def __init__(self, step, count):
self.step = step
self.count = count
self.start = 0
self.end = step * count
def __iter__(self):
return self
def __next__(self):
index = self.start
if index >= self.end:
raise StopIteration
self.start += self.step
return index
numbers = take_skip(10, 5)
for number in numbers:
print(number) | class Take_Skip:
def __init__(self, step, count):
self.step = step
self.count = count
self.start = 0
self.end = step * count
def __iter__(self):
return self
def __next__(self):
index = self.start
if index >= self.end:
raise StopIteration
self.start += self.step
return index
numbers = take_skip(10, 5)
for number in numbers:
print(number) |
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs) == 0:
return ''
def getCommonPrefix(s1, s2):
result = []
for i in range(min(len(s1), len(s2))):
if s1[i] == s2[i]:
result.append(s1[i])
else:
break
return ''.join(result)
commonPrefix = strs[0]
for i in range(1, len(strs)):
commonPrefix = getCommonPrefix(commonPrefix, strs[i])
return commonPrefix | class Solution:
def longest_common_prefix(self, strs: List[str]) -> str:
if len(strs) == 0:
return ''
def get_common_prefix(s1, s2):
result = []
for i in range(min(len(s1), len(s2))):
if s1[i] == s2[i]:
result.append(s1[i])
else:
break
return ''.join(result)
common_prefix = strs[0]
for i in range(1, len(strs)):
common_prefix = get_common_prefix(commonPrefix, strs[i])
return commonPrefix |
n = int(input())
a = list(map(int, input().split()))
xor = a[0]
for x in a[1:]:
xor ^= x
ans = print(*[xor ^ x for x in a]) | n = int(input())
a = list(map(int, input().split()))
xor = a[0]
for x in a[1:]:
xor ^= x
ans = print(*[xor ^ x for x in a]) |
class DictTrafo(object):
def __init__(self, trafo_dict=None, prefix=None):
if trafo_dict is None:
trafo_dict = {}
self.trafo_dict = trafo_dict
if type(prefix) is str:
self.prefix = (prefix,)
elif type(prefix) is tuple:
self.prefix = prefix
else:
self.prefix = None
def transform(self, in_dict, trafo_dict=None, keep_none=False):
if trafo_dict is None:
trafo_dict = self.trafo_dict
res = {}
for key in trafo_dict:
val = trafo_dict[key]
tval = type(val)
# sub dict
if tval is dict:
vres = self.transform(in_dict, val)
# (callable, rel_path)
elif tval is tuple and len(val) == 2 and callable(val[0]):
rel_path = self.read_rel_path(val[1], in_dict)
vres = val[0](key, rel_path)
# a rel_path in in_dict
elif tval in (str, tuple, list):
vres = self.read_rel_path(val, in_dict)
# invalid
else:
raise ValueError("invalid type in trafo_dict: %s" + val)
if vres is not None or keep_none:
res[key] = vres
return res
def read_rel_path(self, path, in_dict):
abs_path = []
if self.prefix:
abs_path += self.prefix
if type(path) is str:
abs_path.append(path)
else:
abs_path += path
return self.read_path(abs_path, in_dict)
def read_path(self, path, in_dict):
if len(path) == 0:
return in_dict
if type(in_dict) is not dict:
return None
key = path[0]
if key in in_dict:
val = in_dict[key]
path = path[1:]
return self.read_path(path, val)
| class Dicttrafo(object):
def __init__(self, trafo_dict=None, prefix=None):
if trafo_dict is None:
trafo_dict = {}
self.trafo_dict = trafo_dict
if type(prefix) is str:
self.prefix = (prefix,)
elif type(prefix) is tuple:
self.prefix = prefix
else:
self.prefix = None
def transform(self, in_dict, trafo_dict=None, keep_none=False):
if trafo_dict is None:
trafo_dict = self.trafo_dict
res = {}
for key in trafo_dict:
val = trafo_dict[key]
tval = type(val)
if tval is dict:
vres = self.transform(in_dict, val)
elif tval is tuple and len(val) == 2 and callable(val[0]):
rel_path = self.read_rel_path(val[1], in_dict)
vres = val[0](key, rel_path)
elif tval in (str, tuple, list):
vres = self.read_rel_path(val, in_dict)
else:
raise value_error('invalid type in trafo_dict: %s' + val)
if vres is not None or keep_none:
res[key] = vres
return res
def read_rel_path(self, path, in_dict):
abs_path = []
if self.prefix:
abs_path += self.prefix
if type(path) is str:
abs_path.append(path)
else:
abs_path += path
return self.read_path(abs_path, in_dict)
def read_path(self, path, in_dict):
if len(path) == 0:
return in_dict
if type(in_dict) is not dict:
return None
key = path[0]
if key in in_dict:
val = in_dict[key]
path = path[1:]
return self.read_path(path, val) |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Python port of legacy_unit_info.html. This is a mapping of various units
# reported by gtest perf tests to histogram units, including improvement
# direction and conversion factors.
# Note that some of the converted names don't match up exactly with the ones
# in the HTML file, as unit names are sometimes different between the two
# implementations. For example, timeDurationInMs in the JavaScript
# implementation is ms in the Python implementation.
IMPROVEMENT_DIRECTION_DONT_CARE = ''
IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER = '_smallerIsBetter'
IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER = '_biggerIsBetter'
class LegacyUnit(object):
"""Simple object for storing data to improve readability."""
def __init__(self, name, improvement_direction, conversion_factor=1):
assert improvement_direction in [
IMPROVEMENT_DIRECTION_DONT_CARE,
IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER]
self._name = name
self._improvement_direction = improvement_direction
self._conversion_factor = conversion_factor
@property
def name(self):
return self._name + self._improvement_direction
@property
def conversion_factor(self):
return self._conversion_factor
def AsTuple(self):
return self.name, self.conversion_factor
LEGACY_UNIT_INFO = {
'%': LegacyUnit('n%', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_DONT_CARE),
'Celsius': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'Hz': LegacyUnit('Hz', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'KB': LegacyUnit('sizeInBytes', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=1024),
'MB': LegacyUnit('sizeInBytes', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=(1024 * 1024)),
'ObjectsAt30FPS': LegacyUnit('unitless',
IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'available_kB': LegacyUnit('sizeInBytes',
IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER,
conversion_factor=1024),
'bit/s': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'bytes': LegacyUnit('sizeInBytes', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'chars/s': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'commit_count': LegacyUnit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'count': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'coverage%': LegacyUnit('n%', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'dB': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'files': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'fps': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'frame_count': LegacyUnit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'frame_time': LegacyUnit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'frames': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'garbage_collections': LegacyUnit('count',
IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'idle%': LegacyUnit('n%', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'janks': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'lines': LegacyUnit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'mWh': LegacyUnit('J', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=3.6),
'milliseconds': LegacyUnit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'milliseconds-per-frame': LegacyUnit(
'ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'minutes': LegacyUnit('msBestFitFormat',
IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=60000),
'mips': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'mpixels_sec': LegacyUnit('unitless',
IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'ms': LegacyUnit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'mtri_sec': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'mvtx_sec': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'objects (bigger is better)': LegacyUnit(
'count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'packets': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'percent': LegacyUnit('n%', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'points': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'ports': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'reduction%': LegacyUnit('n%', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'relocs': LegacyUnit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'runs/s': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'score (bigger is better)': LegacyUnit(
'unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'seconds': LegacyUnit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=1000),
'tokens/s': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER),
'tasks': LegacyUnit('unitless', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER),
'us': LegacyUnit('msBestFitFormat', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=0.001),
'ns': LegacyUnit('msBestFitFormat', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER,
conversion_factor=0.000001),
}
# Add duplicate units here
LEGACY_UNIT_INFO['frames-per-second'] = LEGACY_UNIT_INFO['fps']
LEGACY_UNIT_INFO['kb'] = LEGACY_UNIT_INFO['KB']
LEGACY_UNIT_INFO['ms'] = LEGACY_UNIT_INFO['milliseconds']
LEGACY_UNIT_INFO['runs_per_s'] = LEGACY_UNIT_INFO['runs/s']
LEGACY_UNIT_INFO['runs_per_second'] = LEGACY_UNIT_INFO['runs/s']
LEGACY_UNIT_INFO['score'] = LEGACY_UNIT_INFO['score (bigger is better)']
LEGACY_UNIT_INFO['score_(bigger_is_better)'] = LEGACY_UNIT_INFO['score']
| improvement_direction_dont_care = ''
improvement_direction_smaller_is_better = '_smallerIsBetter'
improvement_direction_bigger_is_better = '_biggerIsBetter'
class Legacyunit(object):
"""Simple object for storing data to improve readability."""
def __init__(self, name, improvement_direction, conversion_factor=1):
assert improvement_direction in [IMPROVEMENT_DIRECTION_DONT_CARE, IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER]
self._name = name
self._improvement_direction = improvement_direction
self._conversion_factor = conversion_factor
@property
def name(self):
return self._name + self._improvement_direction
@property
def conversion_factor(self):
return self._conversion_factor
def as_tuple(self):
return (self.name, self.conversion_factor)
legacy_unit_info = {'%': legacy_unit('n%', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), '': legacy_unit('unitless', IMPROVEMENT_DIRECTION_DONT_CARE), 'Celsius': legacy_unit('unitless', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'Hz': legacy_unit('Hz', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'KB': legacy_unit('sizeInBytes', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=1024), 'MB': legacy_unit('sizeInBytes', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=1024 * 1024), 'ObjectsAt30FPS': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'available_kB': legacy_unit('sizeInBytes', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER, conversion_factor=1024), 'bit/s': legacy_unit('unitless', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'bytes': legacy_unit('sizeInBytes', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'chars/s': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'commit_count': legacy_unit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'count': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'coverage%': legacy_unit('n%', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'dB': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'files': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'fps': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'frame_count': legacy_unit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'frame_time': legacy_unit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'frames': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'garbage_collections': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'idle%': legacy_unit('n%', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'janks': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'lines': legacy_unit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'mWh': legacy_unit('J', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=3.6), 'milliseconds': legacy_unit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'milliseconds-per-frame': legacy_unit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'minutes': legacy_unit('msBestFitFormat', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=60000), 'mips': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'mpixels_sec': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'ms': legacy_unit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'mtri_sec': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'mvtx_sec': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'objects (bigger is better)': legacy_unit('count', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'packets': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'percent': legacy_unit('n%', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'points': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'ports': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'reduction%': legacy_unit('n%', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'relocs': legacy_unit('count', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'runs/s': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'score (bigger is better)': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'seconds': legacy_unit('ms', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=1000), 'tokens/s': legacy_unit('unitless', IMPROVEMENT_DIRECTION_BIGGER_IS_BETTER), 'tasks': legacy_unit('unitless', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER), 'us': legacy_unit('msBestFitFormat', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=0.001), 'ns': legacy_unit('msBestFitFormat', IMPROVEMENT_DIRECTION_SMALLER_IS_BETTER, conversion_factor=1e-06)}
LEGACY_UNIT_INFO['frames-per-second'] = LEGACY_UNIT_INFO['fps']
LEGACY_UNIT_INFO['kb'] = LEGACY_UNIT_INFO['KB']
LEGACY_UNIT_INFO['ms'] = LEGACY_UNIT_INFO['milliseconds']
LEGACY_UNIT_INFO['runs_per_s'] = LEGACY_UNIT_INFO['runs/s']
LEGACY_UNIT_INFO['runs_per_second'] = LEGACY_UNIT_INFO['runs/s']
LEGACY_UNIT_INFO['score'] = LEGACY_UNIT_INFO['score (bigger is better)']
LEGACY_UNIT_INFO['score_(bigger_is_better)'] = LEGACY_UNIT_INFO['score'] |
"""
This module demonstrates OVERLOADING the + symbol:
-- With numbers as operands, it means addition (as in arithmetic)
-- With sequences as operands, it means concatenation, that is,
forming a new sequence that stitches together its operands.
This module also demonstrates the STR function.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, and their colleagues.
"""
# -----------------------------------------------------------------------------
# Students: Read and run this program. There is nothing else
# for you to do in here. Just use it as an example.
# Before you leave this example,
# *** MAKE SURE YOU UNDERSTAND: ***
# *** -- What it means to use + for CONCATENATION ***
# *** -- What the str function does. ***
# -----------------------------------------------------------------------------
def main():
""" Demonstrates OVERLOADING the + symbol. """
# -------------------------------------------------------------------------
# First example below: computes 5 + 33 (addition, as in arithmetic)
# Second example below: stitches together the two lists.
# Third example below: stitches together the three tuples.
# Fourth example below: stitches together the four strings.
# Fifth example: contrasts concatenation with addition.
# -------------------------------------------------------------------------
print()
print('-----------------------------------------------------------')
print('Addition, then various forms of concatenation:')
print('-----------------------------------------------------------')
print(5 + 33)
print([4, 3] + [1, 7, 2, 4])
print((4, 1, 7) + (444,) + (3, 3))
print('hello' + 'Dave' + '55' + '83')
print(5 + 33, '5' + '33')
# -------------------------------------------------------------------------
# The str function and the concatenation form of the + operator
# are handy for making strings from sub-strings. For example:
# -------------------------------------------------------------------------
x = 51
y = 3
z = 40
print()
print('-----------------------------------------------------------')
print('With and (using string concatenation) without spaces:')
print('-----------------------------------------------------------')
# -------------------------------------------------------------------------
# Printing multiple items puts spaces between the items.
# That is usually what you want.
# -------------------------------------------------------------------------
print(x, y, z)
# -------------------------------------------------------------------------
# But if you don't want spaces
# (or want to otherwise format the string result):
# -------------------------------------------------------------------------
x = []
for k in range(5):
x = x + [(2 * k)]
print(x) | """
This module demonstrates OVERLOADING the + symbol:
-- With numbers as operands, it means addition (as in arithmetic)
-- With sequences as operands, it means concatenation, that is,
forming a new sequence that stitches together its operands.
This module also demonstrates the STR function.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, and their colleagues.
"""
def main():
""" Demonstrates OVERLOADING the + symbol. """
print()
print('-----------------------------------------------------------')
print('Addition, then various forms of concatenation:')
print('-----------------------------------------------------------')
print(5 + 33)
print([4, 3] + [1, 7, 2, 4])
print((4, 1, 7) + (444,) + (3, 3))
print('hello' + 'Dave' + '55' + '83')
print(5 + 33, '5' + '33')
x = 51
y = 3
z = 40
print()
print('-----------------------------------------------------------')
print('With and (using string concatenation) without spaces:')
print('-----------------------------------------------------------')
print(x, y, z)
x = []
for k in range(5):
x = x + [2 * k]
print(x) |
filename = 'full_text_small.txt'
def file_write(filename):
with open(filename, 'r') as f:
n = 0
for line in f:
n += 1
if n <= 5:
print(line)
return(line)
file_write(filename)
| filename = 'full_text_small.txt'
def file_write(filename):
with open(filename, 'r') as f:
n = 0
for line in f:
n += 1
if n <= 5:
print(line)
return line
file_write(filename) |
a = 1
b = 0
c = a & b
d = a | b
e = a ^ b
print(c+d+e)
my_list = [[1,2,3,4] for i in range(2)]
print(my_list[1][0])
x =2
x = x==x
print(x)
my_list = [1,2,3]
for v in range(len(my_list)):
my_list.insert(1, my_list[v])
print(my_list) | a = 1
b = 0
c = a & b
d = a | b
e = a ^ b
print(c + d + e)
my_list = [[1, 2, 3, 4] for i in range(2)]
print(my_list[1][0])
x = 2
x = x == x
print(x)
my_list = [1, 2, 3]
for v in range(len(my_list)):
my_list.insert(1, my_list[v])
print(my_list) |
n = int(input())
ans = 0
for i in range(n):
l, c = map(int, input().split())
if l > c:
ans += c
else:
continue
print(ans) | n = int(input())
ans = 0
for i in range(n):
(l, c) = map(int, input().split())
if l > c:
ans += c
else:
continue
print(ans) |
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_moco.py',
'../_base_/datasets/vocdataset_voc0712.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
optimizer = dict(type='SGD', lr=0.02/16, momentum=0.9, weight_decay=0.0001) | _base_ = ['../_base_/models/faster_rcnn_r50_fpn_moco.py', '../_base_/datasets/vocdataset_voc0712.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
optimizer = dict(type='SGD', lr=0.02 / 16, momentum=0.9, weight_decay=0.0001) |
_base_ = ['./mswin_par_small_patch4_512x512_160k_ade20k_pretrain_224x224_1K.py']
model = dict(
decode_head=dict(
mode='seq',
))
data = dict(samples_per_gpu=10)
| _base_ = ['./mswin_par_small_patch4_512x512_160k_ade20k_pretrain_224x224_1K.py']
model = dict(decode_head=dict(mode='seq'))
data = dict(samples_per_gpu=10) |
def merge_sort(arr):
n = len(arr)
if (n >= 2):
A = merge_sort(arr[:int(n/2)])
B = merge_sort(arr[int(n/2):])
i = 0
j = 0
for k in range(0, n):
if i < int(n/2) and (j == len(B) or A[i] <= B[j]):
arr[k] = A[i]
i = i + 1
else:
arr[k] = B[j]
j = j + 1
return arr
arr = input()
arr = [(int(num)) for num in arr.split()]
print(merge_sort(arr)) | def merge_sort(arr):
n = len(arr)
if n >= 2:
a = merge_sort(arr[:int(n / 2)])
b = merge_sort(arr[int(n / 2):])
i = 0
j = 0
for k in range(0, n):
if i < int(n / 2) and (j == len(B) or A[i] <= B[j]):
arr[k] = A[i]
i = i + 1
else:
arr[k] = B[j]
j = j + 1
return arr
arr = input()
arr = [int(num) for num in arr.split()]
print(merge_sort(arr)) |
# --------------
# Code starts here
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class = class_1 + class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {
'Math' : 65,
'English' : 70,
'History' : 80,
'French' : 70,
'Science' : 60
}
print(courses.get('Math'))
print(courses.get('English'))
print(courses.get('History'))
print(courses.get('French'))
print(courses.get('Science'))
total = (courses.get('Math')+courses.get('English')+courses.get('History')+courses.get('French')+courses.get('Science'))
print(total)
percentage = (total/500)*100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {
'Geoffery Hinton' : 78,
'Andrew Ng' : 95,
'Sebastian Raschka' :65,
'Yoshua Benjio' : 50,
'Hilary Mason' : 70,
'Corinna Cortes' : 66,
'Peter Warden' : 75
}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
first_name = topper.split()[0]
last_name = topper.split()[1]
full_name = last_name+' '+first_name
certificate_name = full_name.upper()
print(certificate_name)
# Code starts here
# Code ends here
| class_1 = ['Geoffrey Hinton', 'Andrew Ng', 'Sebastian Raschka', 'Yoshua Bengio']
class_2 = ['Hilary Mason', 'Carla Gentry', 'Corinna Cortes']
new_class = class_1 + class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
courses = {'Math': 65, 'English': 70, 'History': 80, 'French': 70, 'Science': 60}
print(courses.get('Math'))
print(courses.get('English'))
print(courses.get('History'))
print(courses.get('French'))
print(courses.get('Science'))
total = courses.get('Math') + courses.get('English') + courses.get('History') + courses.get('French') + courses.get('Science')
print(total)
percentage = total / 500 * 100
print(percentage)
mathematics = {'Geoffery Hinton': 78, 'Andrew Ng': 95, 'Sebastian Raschka': 65, 'Yoshua Benjio': 50, 'Hilary Mason': 70, 'Corinna Cortes': 66, 'Peter Warden': 75}
topper = max(mathematics, key=mathematics.get)
print(topper)
topper = 'andrew ng'
first_name = topper.split()[0]
last_name = topper.split()[1]
full_name = last_name + ' ' + first_name
certificate_name = full_name.upper()
print(certificate_name) |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( x , y , z ) :
if ( not ( y / x ) ) :
return y if ( not ( y / z ) ) else z
return x if ( not ( x / z ) ) else z
#TOFILL
if __name__ == '__main__':
param = [
(48,63,56,),
(11,55,84,),
(50,89,96,),
(21,71,74,),
(94,39,42,),
(22,44,86,),
(3,41,68,),
(67,62,94,),
(59,2,83,),
(50,11,1,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | def f_gold(x, y, z):
if not y / x:
return y if not y / z else z
return x if not x / z else z
if __name__ == '__main__':
param = [(48, 63, 56), (11, 55, 84), (50, 89, 96), (21, 71, 74), (94, 39, 42), (22, 44, 86), (3, 41, 68), (67, 62, 94), (59, 2, 83), (50, 11, 1)]
n_success = 0
for (i, parameters_set) in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success += 1
print('#Results: %i, %i' % (n_success, len(param))) |
config = {
'lr': (1.5395901937079718e-05, 4.252664987376195e-05, 9.011700881717918e-05, 0.00026653695086486183),
'target_stepsize': 0.07688144983085089,
'feedback_wd': 5.751527315358352e-07,
'beta1': 0.9,
'beta2': 0.999,
'epsilon': (7.952762675272583e-06, 3.573159556208438e-06, 1.0425400798717413e-08, 2.0232644009531115e-08),
'lr_fb': 4.142073343374983e-05,
'sigma': 0.18197929046014408,
'beta1_fb': 0.9,
'beta2_fb': 0.999,
'epsilon_fb': 8.070760899188774e-06,
'out_dir': 'logs/cifar/DDTPConvCIFAR',
'network_type': 'DDTPConvCIFAR',
'initialization': 'xavier_normal',
'fb_activation': 'linear',
'dataset': 'cifar10',
# ### Training options ###
'optimizer': 'Adam',
'optimizer_fb': 'Adam',
'momentum': 0.,
'parallel': True,
'normalize_lr': True,
'batch_size': 128,
'epochs_fb': 10,
'not_randomized': True,
'not_randomized_fb': True,
'extra_fb_minibatches': 0,
'extra_fb_epochs': 1,
'epochs': 300,
'double_precision': True,
'no_val_set': True,
'forward_wd': 0.,
### Network options ###
# 'num_hidden': 3,
# 'size_hidden': 1024,
# 'size_input': 3072,
# 'size_output': 10,
'hidden_activation': 'tanh',
'output_activation': 'softmax',
'no_bias': False,
### Miscellaneous options ###
'no_cuda': False,
'random_seed': 42,
'cuda_deterministic': False,
'freeze_BPlayers': False,
'multiple_hpsearch': False,
### Logging options ###
'save_logs': False,
'save_BP_angle': False,
'save_GN_angle': False,
'save_GN_activations_angle': False,
'save_BP_activations_angle': False,
'gn_damping': 0.
} | config = {'lr': (1.5395901937079718e-05, 4.252664987376195e-05, 9.011700881717918e-05, 0.00026653695086486183), 'target_stepsize': 0.07688144983085089, 'feedback_wd': 5.751527315358352e-07, 'beta1': 0.9, 'beta2': 0.999, 'epsilon': (7.952762675272583e-06, 3.573159556208438e-06, 1.0425400798717413e-08, 2.0232644009531115e-08), 'lr_fb': 4.142073343374983e-05, 'sigma': 0.18197929046014408, 'beta1_fb': 0.9, 'beta2_fb': 0.999, 'epsilon_fb': 8.070760899188774e-06, 'out_dir': 'logs/cifar/DDTPConvCIFAR', 'network_type': 'DDTPConvCIFAR', 'initialization': 'xavier_normal', 'fb_activation': 'linear', 'dataset': 'cifar10', 'optimizer': 'Adam', 'optimizer_fb': 'Adam', 'momentum': 0.0, 'parallel': True, 'normalize_lr': True, 'batch_size': 128, 'epochs_fb': 10, 'not_randomized': True, 'not_randomized_fb': True, 'extra_fb_minibatches': 0, 'extra_fb_epochs': 1, 'epochs': 300, 'double_precision': True, 'no_val_set': True, 'forward_wd': 0.0, 'hidden_activation': 'tanh', 'output_activation': 'softmax', 'no_bias': False, 'no_cuda': False, 'random_seed': 42, 'cuda_deterministic': False, 'freeze_BPlayers': False, 'multiple_hpsearch': False, 'save_logs': False, 'save_BP_angle': False, 'save_GN_angle': False, 'save_GN_activations_angle': False, 'save_BP_activations_angle': False, 'gn_damping': 0.0} |
# constants related to the matchers
# all the types of matches
MATCH_TYPE_NONE = 0
MATCH_TYPE_RESET = 1
MATCH_TYPE_NMI = 2
MATCH_TYPE_WAIT_START = 3
MATCH_TYPE_WAIT_END = 4
MATCH_TYPE_BITS = 6 # number of bits required to represent the above (max 8)
NUM_MATCHERS = 32 # how many match engines are there?
MATCHER_BITS = 5 # number of bits required to represent the above (max 8)
| match_type_none = 0
match_type_reset = 1
match_type_nmi = 2
match_type_wait_start = 3
match_type_wait_end = 4
match_type_bits = 6
num_matchers = 32
matcher_bits = 5 |
def get_initial(name, force_uppercase=True):
if force_uppercase:
initial = name[0:1].upper()
else:
initial = name[0:1].lower()
return initial
first_name = input('Enter your first name: ')
# initial = get_initial(first_name)
initial = get_initial(force_uppercase=False, name=first_name)
print('Your initial is: ' + initial) | def get_initial(name, force_uppercase=True):
if force_uppercase:
initial = name[0:1].upper()
else:
initial = name[0:1].lower()
return initial
first_name = input('Enter your first name: ')
initial = get_initial(force_uppercase=False, name=first_name)
print('Your initial is: ' + initial) |
INPUT_PATH = "./input.txt"
input_file = open(INPUT_PATH, "r")
lines = input_file.readlines()
input_file.close()
divided_input = [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in lines]
# Part 1
print("Part 1: ", sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in divided_input]))
# Cursed part 1 1-liner assuming INPUT_PATH is defined with path to the input file
#print("Part 1: ", sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in open(INPUT_PATH, "r").readlines()]]))
# Part 2
total_sum = 0
for elem in divided_input:
left = elem[0]
right = elem[1]
numbers = {
"0": None,
"1": [x for x in left if len(x) == 2][0],
"2": None,
"3": None,
"4": [x for x in left if len(x) == 4][0],
"5": None,
"6": None,
"7": [x for x in left if len(x) == 3][0],
"8": [x for x in left if len(x) == 7][0],
"9": None
}
segments = {
"a": None,
"b": None,
"c": None,
"d": None,
"e": None,
"f": None,
"g": None
}
(segments["a"],) = numbers["7"].difference(numbers["1"])
numbers["6"] = [x for x in left if len(x) == 6 and len(x.intersection(numbers["1"])) == 1][0]
zero_and_nine = [x for x in left if len(x) == 6 and len(x.difference(numbers["6"])) != 0]
numbers["9"] = [x for x in zero_and_nine if len(x.intersection(numbers["4"])) == 4][0]
numbers["0"] = [x for x in zero_and_nine if len(x.intersection(numbers["4"])) == 3][0]
(segments["f"],) = numbers["6"].intersection(numbers["1"])
(segments["c"],) = numbers["1"].difference(set(segments["f"]))
(segments["e"],) = numbers["6"].difference(numbers["9"])
(segments["d"],) = numbers["8"].difference(numbers["0"])
(segments["b"],) = numbers["4"].difference(set([x for x in segments.values() if x is not None]))
(segments["g"],) = numbers["8"].difference(set([x for x in segments.values() if x is not None]))
numbers["2"] = set([segments["a"], segments["c"], segments["d"], segments["e"], segments["g"]])
numbers["5"] = set([segments["a"], segments["b"], segments["d"], segments["f"], segments["g"]])
numbers["3"] = set([segments["a"], segments["c"], segments["d"], segments["f"], segments["g"]])
value = 0
for digit_set in right:
value *= 10
digit = int([k for k, v in numbers.items() if v == digit_set][0])
value += digit
total_sum += value
print("Part 2: ", total_sum) | input_path = './input.txt'
input_file = open(INPUT_PATH, 'r')
lines = input_file.readlines()
input_file.close()
divided_input = [[[set(x) for x in x.split()] for x in line.split(' | ')] for line in lines]
print('Part 1: ', sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in divided_input]))
total_sum = 0
for elem in divided_input:
left = elem[0]
right = elem[1]
numbers = {'0': None, '1': [x for x in left if len(x) == 2][0], '2': None, '3': None, '4': [x for x in left if len(x) == 4][0], '5': None, '6': None, '7': [x for x in left if len(x) == 3][0], '8': [x for x in left if len(x) == 7][0], '9': None}
segments = {'a': None, 'b': None, 'c': None, 'd': None, 'e': None, 'f': None, 'g': None}
(segments['a'],) = numbers['7'].difference(numbers['1'])
numbers['6'] = [x for x in left if len(x) == 6 and len(x.intersection(numbers['1'])) == 1][0]
zero_and_nine = [x for x in left if len(x) == 6 and len(x.difference(numbers['6'])) != 0]
numbers['9'] = [x for x in zero_and_nine if len(x.intersection(numbers['4'])) == 4][0]
numbers['0'] = [x for x in zero_and_nine if len(x.intersection(numbers['4'])) == 3][0]
(segments['f'],) = numbers['6'].intersection(numbers['1'])
(segments['c'],) = numbers['1'].difference(set(segments['f']))
(segments['e'],) = numbers['6'].difference(numbers['9'])
(segments['d'],) = numbers['8'].difference(numbers['0'])
(segments['b'],) = numbers['4'].difference(set([x for x in segments.values() if x is not None]))
(segments['g'],) = numbers['8'].difference(set([x for x in segments.values() if x is not None]))
numbers['2'] = set([segments['a'], segments['c'], segments['d'], segments['e'], segments['g']])
numbers['5'] = set([segments['a'], segments['b'], segments['d'], segments['f'], segments['g']])
numbers['3'] = set([segments['a'], segments['c'], segments['d'], segments['f'], segments['g']])
value = 0
for digit_set in right:
value *= 10
digit = int([k for (k, v) in numbers.items() if v == digit_set][0])
value += digit
total_sum += value
print('Part 2: ', total_sum) |
#Python Lists
mylist = [ "banana", "abacate", "manga"]
print(mylist) | mylist = ['banana', 'abacate', 'manga']
print(mylist) |
# working on final project to combine all the learnt concepts into 1
# problem statement.
#The CTO wants to monitor all the computer usage by all engineers. Using Python ,
# write an automation script that will produce a report when each user logged in and out,
# and how long each user used the computers.
# writing real script:
# first sort all the processes by date using a function:
def get_event_date(event):
return event.date
#get current user and pass the sorted date function
# first sort all the processes by date using a function:
def get_event_date(event):
return event.date
#get current user and pass the sorted date function
# first sort all the processes by date using a function:
def get_event_date(event):
return event.date
#get current user and pass the sorted date function
def current_users(events):
events.sort(key=get_event_date)
#create a dictionary to store the values
machines = {}
for event in events:
#check if a mchine exist in dictionary else add
if event.machine not in machines:
machines[event.machine] = set()
if event.type == "logout":
machines[event.machine].add(event.user)
elif event.type == "logout":
machines[event.machine].remove(event.user)
return machines
#create a different function to print the report
def generate_report(machines):
for machines,users in machines.items():
#print only thoses who logged in and not those who loged in and out:
if len(users)>0:
users_list = ", ".join(users)
print("{} : {}".format(machines,users_list))
class Event:
def __init__(self, event_date, event_type, machine_name,user):
self.date = event_date
self.type = event_type
self.machine =machine_name
self.user = user
events = [
Event("2020-05-12 12:50PM","login","mail-server local", "owen"),
Event("2021-04-12 4:50PM","logout","mail-server local", "james"),
Event("2020-05-14 2:50PM","login","workstation local", "shem"),
Event("2020-05-1 16:50PM","login","mail-server local", "Timz"),
Event("2020-06-19 18:50PM","logout","admin server local", "brian"),
Event("2020-02-12 17:50PM","login","mail-server local", "chris")
]
#try creation
users = current_users(events)
print(users)
# generate user report
generate_report(users)
# CONGRATALTIONS:
# Up next final project;
| def get_event_date(event):
return event.date
def get_event_date(event):
return event.date
def get_event_date(event):
return event.date
def current_users(events):
events.sort(key=get_event_date)
machines = {}
for event in events:
if event.machine not in machines:
machines[event.machine] = set()
if event.type == 'logout':
machines[event.machine].add(event.user)
elif event.type == 'logout':
machines[event.machine].remove(event.user)
return machines
def generate_report(machines):
for (machines, users) in machines.items():
if len(users) > 0:
users_list = ', '.join(users)
print('{} : {}'.format(machines, users_list))
class Event:
def __init__(self, event_date, event_type, machine_name, user):
self.date = event_date
self.type = event_type
self.machine = machine_name
self.user = user
events = [event('2020-05-12 12:50PM', 'login', 'mail-server local', 'owen'), event('2021-04-12 4:50PM', 'logout', 'mail-server local', 'james'), event('2020-05-14 2:50PM', 'login', 'workstation local', 'shem'), event('2020-05-1 16:50PM', 'login', 'mail-server local', 'Timz'), event('2020-06-19 18:50PM', 'logout', 'admin server local', 'brian'), event('2020-02-12 17:50PM', 'login', 'mail-server local', 'chris')]
users = current_users(events)
print(users)
generate_report(users) |
class RockartExamplesException(Exception):
pass
class RockartExamplesIndexError(RockartExamplesException, IndexError):
pass
class RockartExamplesValueError(RockartExamplesException, ValueError):
pass
| class Rockartexamplesexception(Exception):
pass
class Rockartexamplesindexerror(RockartExamplesException, IndexError):
pass
class Rockartexamplesvalueerror(RockartExamplesException, ValueError):
pass |
print("Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n")
row = int(input("Enter the number of rows: "))
for i in range(1, row+1):
for j in range(i):
print("*", end=" ")
print()
for i in range(row+1, 0, -1):
for j in range(i):
print("*", end=" ")
print() | print('Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n')
row = int(input('Enter the number of rows: '))
for i in range(1, row + 1):
for j in range(i):
print('*', end=' ')
print()
for i in range(row + 1, 0, -1):
for j in range(i):
print('*', end=' ')
print() |
#!/usr/bin/env python3
for hour_offset in range(0, 24, 6):
train = open('data/train_b{:02}.csv'.format(hour_offset), 'w', newline='')
test = open('data/test_b{:02}.csv'.format(hour_offset), 'w', newline='')
data = open('data/data.txt')
t = int(next(data))
n, m = tuple(map(int, next(data).split()))
for line_num, line in enumerate(data):
hse = line_num // n # hours since epoch
hod = hse % 24 # hour of day
dse = (hse + hour_offset) // 24 # days since epoch
dow = dse % 7 # day of week
row = line_num % n
for col, dem in enumerate(map(int, line.split())):
out, lim = (test, -1) if dem == -1 else (train, None)
out.write(','.join(map(str, (hse, row, col, hod, dow, dem)[:lim])) + '\n')
data.close()
train.close()
test.close()
| for hour_offset in range(0, 24, 6):
train = open('data/train_b{:02}.csv'.format(hour_offset), 'w', newline='')
test = open('data/test_b{:02}.csv'.format(hour_offset), 'w', newline='')
data = open('data/data.txt')
t = int(next(data))
(n, m) = tuple(map(int, next(data).split()))
for (line_num, line) in enumerate(data):
hse = line_num // n
hod = hse % 24
dse = (hse + hour_offset) // 24
dow = dse % 7
row = line_num % n
for (col, dem) in enumerate(map(int, line.split())):
(out, lim) = (test, -1) if dem == -1 else (train, None)
out.write(','.join(map(str, (hse, row, col, hod, dow, dem)[:lim])) + '\n')
data.close()
train.close()
test.close() |
p = [1,2,3,4,5,6,7,8,9]
del p[1:3]
print(p[:])
p.remove(8)
print(p[:])
print(p.pop())
p.clear()
print(p[:])
l=[1,3,4,5,6,7]
l.remove(3)
print(l[:])
l.sort()
print(l[:])
l.reverse()
print(l[:])
l.clear()
print(l[:])
| p = [1, 2, 3, 4, 5, 6, 7, 8, 9]
del p[1:3]
print(p[:])
p.remove(8)
print(p[:])
print(p.pop())
p.clear()
print(p[:])
l = [1, 3, 4, 5, 6, 7]
l.remove(3)
print(l[:])
l.sort()
print(l[:])
l.reverse()
print(l[:])
l.clear()
print(l[:]) |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylark rules for Swift."""
load(
"@bazel_skylib//lib:collections.bzl",
"collections",
)
load(
"@build_bazel_rules_apple//apple/bundling:apple_bundling_aspect.bzl",
"apple_bundling_aspect",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleResourceInfo",
"AppleResourceSet",
"SwiftInfo",
)
load(
"@build_bazel_rules_apple//apple:utils.bzl",
"xcrun_action",
"XCRUNWRAPPER_LABEL",
"module_cache_path",
"label_scoped_path",
)
load(
"@build_bazel_rules_apple//apple/bundling:xcode_support.bzl",
"xcode_support",
)
load(
"@build_bazel_rules_apple//common:attrs.bzl",
"attrs",
)
load(
"@build_bazel_rules_apple//common:providers.bzl",
"providers",
)
def _parent_dirs(dirs):
"""Returns a set of parent directories for each directory in dirs."""
return depset(direct=[f.rpartition("/")[0] for f in dirs])
def _framework_names(dirs):
"""Returns the framework name for each directory in dir."""
return depset(direct=[f.rpartition("/")[2].partition(".")[0] for f in dirs])
def _swift_target(cpu, platform, sdk_version):
"""Returns a target triplet for Swift compiler."""
platform_string = str(platform.platform_type)
if platform_string not in ["ios", "watchos", "tvos", "macos"]:
fail("Platform '%s' is not supported" % platform_string)
if platform_string == "macos":
platform_string = "macosx"
return "%s-apple-%s%s" % (cpu, platform_string, sdk_version)
def _swift_compilation_mode_flags(config_vars, objc_fragment):
"""Returns additional `swiftc` flags for the current compilation mode.
Args:
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
objc_fragment: The Objective-C configuration fragment.
Returns:
The additional command line flags to pass to `swiftc`.
"""
mode = config_vars["COMPILATION_MODE"]
flags = []
if mode == "dbg" or mode == "fastbuild":
# TODO(dmishe): Find a way to test -serialize-debugging-options
flags += [
"-Onone", "-DDEBUG", "-enable-testing", "-Xfrontend",
"-serialize-debugging-options"
]
elif mode == "opt":
flags += ["-O", "-DNDEBUG"]
if mode == "dbg" or objc_fragment.generate_dsym:
flags.append("-g")
return flags
def _clang_compilation_mode_flags(objc_fragment):
"""Returns additional clang flags for the current compilation mode."""
# In general, every compilation mode flag from native objc_ rules should be
# passed, but -g seems to break Clang module compilation. Since this flag does
# not make much sense for module compilation and only touches headers,
# it's ok to omit.
native_clang_flags = objc_fragment.copts_for_current_compilation_mode
return [x for x in native_clang_flags if x != "-g"]
def _swift_bitcode_flags(apple_fragment):
"""Returns bitcode flags based on selected mode."""
mode = str(apple_fragment.bitcode_mode)
if mode == "embedded":
return ["-embed-bitcode"]
elif mode == "embedded_markers":
return ["-embed-bitcode-marker"]
return []
def _swift_sanitizer_flags(features):
"""Returns sanitizer flags."""
sanitizer_features_to_flags = {
"asan": ["-sanitize=address"],
}
sanitizer_flags = []
for (feature, flags) in sanitizer_features_to_flags.items():
if feature in features:
sanitizer_flags.extend(flags)
return sanitizer_flags
def swift_module_name(label):
"""Returns a module name for the given label."""
prefix = label.package.lstrip("//").replace("/", "_").replace("-", "_")
suffix = label.name.replace("-", "_")
if prefix:
return (prefix + "_" + suffix)
else:
return suffix
def _swift_lib_dir(apple_fragment, config_vars, is_static=False):
"""Returns the location of Swift runtime directory to link against.
Args:
apple_fragment: The Apple configuration fragment.
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
is_static: If True, the static library directory will be used instead of the
dynamic library directory (currently available only on macOS).
Returns:
The location of the Swift runtime directory to link against.
"""
dir_name = "swift_static" if is_static else "swift"
platform_str = apple_fragment.single_arch_platform.name_in_plist.lower()
if "xcode_toolchain_path" in config_vars:
return "{0}/usr/lib/{1}/{2}".format(
config_vars["xcode_toolchain_path"], dir_name, platform_str)
return "{0}/Toolchains/XcodeDefault.xctoolchain/usr/lib/{1}/{2}".format(
apple_common.apple_toolchain().developer_dir(),
dir_name,
platform_str)
def swift_linkopts(apple_fragment, config_vars, is_static=False):
"""Returns additional linker arguments needed to link Swift.
Args:
apple_fragment: The Apple configuration fragment.
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
is_static: If True, the static library directory will be used instead of the
dynamic library directory (currently available only on macOS).
Returns:
Additional linker arguments needed to link Swift.
"""
return ["-L" + _swift_lib_dir(apple_fragment, config_vars, is_static)]
def _swift_parsing_flags(srcs):
"""Returns additional parsing flags for swiftc."""
# swiftc has two different parsing modes: script and library.
# The difference is that in script mode top-level expressions are allowed.
# This mode is triggered when the file compiled is called main.swift.
# Additionally, script mode is used when there's just one file in the
# compilation. we would like to avoid that and therefore force library mode
# when there's only one source and it's not called main.
if len(srcs) == 1 and srcs[0].basename != "main.swift":
return ["-parse-as-library"]
return []
def _is_valid_swift_module_name(string):
"""Returns True if the string is a valid Swift module name."""
if not string:
return False
for char in string:
# Check that the character is in [a-zA-Z0-9_]
if not (char.isalnum() or char == "_"):
return False
return True
def _validate_rule_and_deps(ctx):
"""Validates the target and its dependencies."""
name_error_str = ("Error in target '%s', Swift target and its dependencies' "+
"names can only contain characters in [a-zA-Z0-9_].")
# Validate the name of the target
if not _is_valid_swift_module_name(ctx.label.name):
fail(name_error_str % ctx.label)
# Validate names of the dependencies
for dep in ctx.attr.deps:
if not _is_valid_swift_module_name(dep.label.name):
fail(name_error_str % dep.label)
def _get_wmo_state(copts, swift_fragment):
"""Returns the status of Whole Module Optimization feature.
Args:
copts: The list of copts to search for WMO flags.
swift_fragment: The Swift configuration fragment.
Returns:
A Boolean value indicating whether WMO has been enabled.
"""
all_copts = copts + swift_fragment.copts()
return "-wmo" in all_copts or "-whole-module-optimization" in all_copts
def swift_compile_requirements(
srcs,
deps,
module_name,
label,
copts,
defines,
apple_fragment,
objc_fragment,
swift_fragment,
config_vars,
default_configuration,
xcode_config,
genfiles_dir,
features):
"""Returns a struct that contains the requirements to compile Swift code.
Args:
srcs: The list of `*.swift` sources to compile.
deps: The list of targets that are dependencies for the sources being
compiled.
module_name: The name of the Swift module to which the compiled files
belong.
label: The label used to generate the Swift module name if one was not
provided.
copts: A list of compiler options to pass to `swiftc`. Defaults to an empty
list.
defines: A list of compiler defines to pass to `swiftc`. Defaults to an
empty list.
apple_fragment: The Apple configuration fragment.
objc_fragment: The Objective-C configuration fragment.
swift_fragment: The Swift configuration fragment.
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
default_configuration: The default configuration retrieved from the rule
context.
xcode_config: the XcodeVersionConfig to use
genfiles_dir: The directory where genfiles are written.
features: List of enabled features as passed with --features or set into the
features attribute.
Returns:
A structure that contains the information required to compile Swift code.
"""
return struct(
srcs=srcs,
deps=deps,
module_name=module_name,
label=label,
copts=copts,
defines=defines,
apple_fragment=apple_fragment,
objc_fragment=objc_fragment,
swift_fragment=swift_fragment,
config_vars=config_vars,
default_configuration=default_configuration,
xcode_config=xcode_config,
genfiles_dir=genfiles_dir,
features=features,
)
def swiftc_inputs(ctx):
"""Determine the list of inputs required for the compile action.
Args:
ctx: rule context.
Returns:
A list of files needed by swiftc.
"""
# TODO(allevato): Simultaneously migrate callers off this function and swap it
# out with swiftc_inputs.
return _swiftc_inputs(ctx.files.srcs, ctx.attr.deps)
def _swiftc_inputs(srcs, deps=[]):
"""Determines the list of inputs required for a compile action.
Args:
srcs: A list of `*.swift` source files being compiled.
deps: A list of targetsthat are dependencies of the files being compiled.
Returns:
A list of files that should be passed as inputs to the Swift compilation
action.
"""
swift_providers = providers.find_all(deps, SwiftInfo)
dep_modules = depset(transitive=[
swift.transitive_modules for swift in swift_providers
])
transitive_objc = apple_common.new_objc_provider(
providers=providers.find_all(deps, "objc"))
objc_files = depset(transitive=[
transitive_objc.header,
transitive_objc.module_map,
transitive_objc.umbrella_header,
transitive_objc.static_framework_file,
transitive_objc.dynamic_framework_file,
])
return srcs + dep_modules.to_list() + objc_files.to_list()
def swiftc_args(ctx):
"""Returns an almost compelete array of arguments to be passed to swiftc.
This macro is intended to be used by the swift_library rule implementation
below but it also may be used by other rules outside this file. It has no
side effects and does not modify ctx. It expects ctx to contain the same
fragments and attributes as swift_library (you're encouraged to depend on
SWIFT_LIBRARY_ATTRS in your rule definition).
Args:
ctx: rule context
Returns:
A list of command line arguments for swiftc. The returned arguments
include everything except the arguments generation of which would require
adding new files or actions.
"""
# TODO(allevato): Simultaneously migrate callers off this function and swap it
# out with swiftc_args.
reqs = swift_compile_requirements(
ctx.files.srcs, ctx.attr.deps, ctx.attr.module_name, ctx.label,
ctx.attr.copts, ctx.attr.defines, ctx.fragments.apple, ctx.fragments.objc,
ctx.fragments.swift, ctx.var, ctx.configuration,
ctx.attr._xcode_config[apple_common.XcodeVersionConfig], ctx.genfiles_dir,
ctx.features)
return _swiftc_args(reqs)
def _swiftc_args(reqs):
"""Returns an almost complete array of arguments to be passed to swiftc.
This macro is intended to be used by the swift_library rule implementation
below but it also may be used by other rules outside this file.
Args:
reqs: The compilation requirements as returned by
`swift_compile_requirements`.
Returns:
A list of command line arguments for `swiftc`. The returned arguments
include everything except the arguments generation of which would require
adding new files or actions.
"""
apple_fragment = reqs.apple_fragment
deps = reqs.deps
cpu = apple_fragment.single_arch_cpu
platform = apple_fragment.single_arch_platform
target_os = reqs.xcode_config.minimum_os_for_platform_type(
platform.platform_type)
target = _swift_target(cpu, platform, target_os)
apple_toolchain = apple_common.apple_toolchain()
# Collect transitive dependecies.
dep_modules = depset()
swiftc_defines = depset(reqs.defines)
swift_providers = providers.find_all(deps, SwiftInfo)
for swift in swift_providers:
dep_modules += swift.transitive_modules
swiftc_defines += swift.transitive_defines
objc_providers = providers.find_all(deps, "objc")
transitive_objc = apple_common.new_objc_provider(providers=objc_providers)
# Everything that needs to be included with -I. These need to be pulled from
# the list of providers because there are currently issues with some required
# header search paths for ObjC protos not being available to ClangImporter.
objc_includes = []
for objc in objc_providers:
objc_includes += objc.include.to_list()
objc_includes = depset(objc_includes)
# Module maps for dependent targets. These should be pulled from the combined
# provider to ensure that we only get direct deps.
objc_module_maps = transitive_objc.module_map
static_frameworks = _framework_names(transitive_objc.framework_dir)
# A list of paths to pass with -F flag.
framework_dirs = depset(
direct=[apple_toolchain.platform_developer_framework_dir(apple_fragment)],
transitive=[
_parent_dirs(transitive_objc.framework_dir),
_parent_dirs(transitive_objc.dynamic_framework_dir),
])
# objc_library#copts is not propagated to its dependencies and so it is not
# collected here. In theory this may lead to un-importable targets (since
# their module cannot be compiled by clang), but did not occur in practice.
objc_defines = transitive_objc.define
srcs_args = [f.path for f in reqs.srcs]
# Include each swift module's parent directory for imports to work.
include_dirs = depset([x.dirname for x in dep_modules])
# Include the genfiles root so full-path imports can work for generated protos.
include_dirs += depset([reqs.genfiles_dir.path])
include_args = ["-I%s" % d for d in include_dirs + objc_includes]
framework_args = ["-F%s" % x for x in framework_dirs]
define_args = ["-D%s" % x for x in swiftc_defines.to_list()]
# Disable the LC_LINKER_OPTION load commands for static frameworks automatic
# linking. This is needed to correctly deduplicate static frameworks from also
# being linked into test binaries where it is also linked into the app binary.
autolink_args = collections.before_each(
"-Xfrontend",
collections.before_each("-disable-autolink-framework", static_frameworks))
clang_args = collections.before_each(
"-Xcc",
# Add the current directory to clang's search path.
# This instance of clang is spawned by swiftc to compile module maps and
# is not passed the current directory as a search path by default.
["-iquote", "."]
# Pass DEFINE or copt values from objc configuration and rules to clang
+ ["-D" + x for x in objc_defines] + reqs.objc_fragment.copts
+ _clang_compilation_mode_flags(reqs.objc_fragment)
# Load module maps explicitly instead of letting Clang discover them on
# search paths. This is needed to avoid a case where Clang may load the
# same header both in modular and non-modular contexts, leading to
# duplicate definitions in the same file.
# https://llvm.org/bugs/show_bug.cgi?id=19501
+ ["-fmodule-map-file=%s" % x.path for x in objc_module_maps])
args = [
"-emit-object",
"-module-name",
reqs.module_name,
"-target",
target,
"-sdk",
apple_toolchain.sdk_dir(),
"-module-cache-path",
module_cache_path(reqs.genfiles_dir),
]
if reqs.default_configuration.coverage_enabled:
args.extend(["-profile-generate", "-profile-coverage-mapping"])
args.extend(_swift_compilation_mode_flags(
reqs.config_vars, reqs.objc_fragment))
args.extend(_swift_bitcode_flags(apple_fragment))
args.extend(_swift_parsing_flags(reqs.srcs))
args.extend(_swift_sanitizer_flags(reqs.features))
args.extend(srcs_args)
args.extend(include_args)
args.extend(framework_args)
args.extend(clang_args)
args.extend(define_args)
args.extend(autolink_args)
# Add user flags in the very end, this will let the compiler better sanitize
# unterminated flag pairs (e.g. -Xcc) and not clash with generated flags.
args.extend(reqs.swift_fragment.copts())
args.extend(reqs.copts)
return args
def _find_swift_version(args):
"""Returns the value of the `-swift-version` argument, if found.
Args:
args: The command-line arguments to be scanned.
Returns:
The value of the `-swift-version` argument, or None if it was not found in
the argument list.
"""
# Note that the argument can occur multiple times, and the last one wins.
last_swift_version = None
count = len(args)
for i in range(count):
arg = args[i]
if arg == "-swift-version" and i + 1 < count:
last_swift_version = args[i + 1]
return last_swift_version
def register_swift_compile_actions(ctx, reqs):
"""Registers actions to compile Swift sources.
Args:
ctx: The rule context. Within this function, it should only be used to
register actions, or declare files; do not use it to access attributes
because it may be called from many different rules.
reqs: The compilation requirements as returned by
`swift_compile_requirements`.
Returns:
A tuple containing the (1) output files of the compilation action, the (2)
`objc` provider, and (3) the `SwiftInfo` provider that should be propagated
by a target compiling these Swift sources.
"""
module_name = reqs.module_name
label = reqs.label
# Collect transitive dependencies.
dep_modules = depset()
dep_libs = depset()
dep_docs = depset()
swiftc_defines = depset(reqs.defines)
swift_providers = providers.find_all(reqs.deps, SwiftInfo)
for swift in swift_providers:
dep_libs += swift.transitive_libs
dep_modules += swift.transitive_modules
dep_docs += swift.transitive_docs
swiftc_defines += swift.transitive_defines
# A unique path for rule's outputs.
objs_outputs_path = label_scoped_path(reqs.label, "_objs/")
output_lib = ctx.new_file(objs_outputs_path + module_name + ".a")
output_module = ctx.new_file(objs_outputs_path + module_name + ".swiftmodule")
output_doc = ctx.new_file(objs_outputs_path + module_name + ".swiftdoc")
# These filenames are guaranteed to be unique, no need to scope.
output_header = ctx.new_file(label.name + "-Swift.h")
swiftc_output_map_file = ctx.new_file(label.name + ".output_file_map.json")
swiftc_output_map = struct() # Maps output types to paths.
output_objs = [] # Object file outputs, used in archive action.
swiftc_outputs = [] # Other swiftc outputs that aren't processed further.
has_wmo = _get_wmo_state(reqs.copts, reqs.swift_fragment)
for source in reqs.srcs:
basename = source.basename
output_map_entry = {}
# Output an object file
obj = ctx.new_file(objs_outputs_path + basename + ".o")
output_objs.append(obj)
output_map_entry["object"] = obj.path
# Output a partial module file, unless WMO is enabled in which case only
# the final, complete module will be generated.
if not has_wmo:
partial_module = ctx.new_file(objs_outputs_path + basename +
".partial_swiftmodule")
swiftc_outputs.append(partial_module)
output_map_entry["swiftmodule"] = partial_module.path
swiftc_output_map += struct(**{source.path: struct(**output_map_entry)})
# Write down the intermediate outputs map for this compilation, to be used
# with -output-file-map flag.
# It's a JSON file that maps each source input (.swift) to its outputs
# (.o, .bc, .d, ...)
# Example:
# {'foo.swift':
# {'object': 'foo.o', 'bitcode': 'foo.bc', 'dependencies': 'foo.d'}}
# There's currently no documentation on this option, however all of the keys
# are listed here https://github.com/apple/swift/blob/swift-2.2.1-RELEASE/include/swift/Driver/Types.def
ctx.file_action(
output=swiftc_output_map_file,
content=swiftc_output_map.to_json())
args = ["swiftc"] + _swiftc_args(reqs)
swift_version = _find_swift_version(args)
args += [
"-I" + output_module.dirname,
"-emit-module-path",
output_module.path,
"-emit-objc-header-path",
output_header.path,
"-output-file-map",
swiftc_output_map_file.path,
]
if has_wmo:
# WMO has two modes: threaded and not. We want the threaded mode because it
# will use the output map we generate. This leads to a better debug
# experience in lldb and Xcode.
# TODO(b/32571265): 12 has been chosen as the best option for a Mac Pro,
# we should get an interface in Bazel to get core count.
args.extend(["-num-threads", "12"])
xcrun_action(
ctx,
inputs=_swiftc_inputs(reqs.srcs, reqs.deps) + [swiftc_output_map_file],
outputs=([output_module, output_header, output_doc] + output_objs +
swiftc_outputs),
mnemonic="SwiftCompile",
arguments=args,
use_default_shell_env=False,
progress_message=("Compiling Swift module %s (%d files)" %
(reqs.label.name, len(reqs.srcs))))
xcrun_action(ctx,
inputs=output_objs,
outputs=(output_lib,),
mnemonic="SwiftArchive",
arguments=[
"libtool", "-static", "-o", output_lib.path
] + [x.path for x in output_objs],
progress_message=(
"Archiving Swift objects %s" % reqs.label.name))
# This tells the linker to write a reference to .swiftmodule as an AST symbol
# in the final binary.
# With dSYM enabled, this results in a __DWARF,__swift_ast section added to
# the dSYM binary, from where LLDB is able deserialize module information.
# Without dSYM, LLDB will follow the AST references, however there is a bug
# where it follows only the first one https://bugs.swift.org/browse/SR-2637
# This means that dSYM is required for debugging until that is resolved.
extra_linker_args = ["-Xlinker -add_ast_path -Xlinker " + output_module.path]
# The full transitive set of libraries and modules used by this target.
transitive_libs = depset([output_lib]) + dep_libs
transitive_modules = depset([output_module]) + dep_modules
transitive_docs = depset([output_doc]) + dep_docs
compile_outputs = [output_lib, output_module, output_header, output_doc]
objc_providers = providers.find_all(reqs.deps, "objc")
objc_provider_args = {
"library": depset([output_lib]) + dep_libs,
"header": depset([output_header]),
"providers": objc_providers,
"link_inputs": depset([output_module]),
"uses_swift": True,
}
# Re-propagate direct Objective-C module maps to dependents, because those
# Swift modules still need to see them. We need to construct a new transitive
# objc provider to get the correct strict propagation behavior.
transitive_objc = apple_common.new_objc_provider(providers=objc_providers)
objc_provider_args["module_map"] = transitive_objc.module_map
# TODO(b/63674406): For macOS, don't propagate the runtime linker path flags,
# because we need to be able to be able to choose the static version of the
# library instead. Clean this up once the native bundling rules are deleted.
platform_type = ctx.fragments.apple.single_arch_platform.platform_type
if platform_type != apple_common.platform_type.macos:
objc_provider_args["linkopt"] = depset(
swift_linkopts(reqs.apple_fragment, reqs.config_vars) +
extra_linker_args, order="topological")
objc_provider = apple_common.new_objc_provider(**objc_provider_args)
return compile_outputs, objc_provider, SwiftInfo(
direct_lib=output_lib,
direct_module=output_module,
direct_doc=output_doc,
swift_version=swift_version,
transitive_libs=transitive_libs,
transitive_modules=transitive_modules,
transitive_defines=swiftc_defines,
transitive_docs=transitive_docs,
)
def merge_swift_info_providers(targets):
"""Merges the transitive Swift info of the given targets into a new provider.
This function should be used when it is necessary to merge SwiftInfo providers
outside of a compile action (which does it automatically).
Args:
targets: A sequence of targets that may propagate SwiftInfo providers. Those
that do not are ignored.
Returns:
A new SwiftInfo provider that contains the transitive information from all
the targets.
"""
transitive_defines = depset()
transitive_libs = depset()
transitive_modules = depset()
transitive_docs = depset()
for swift_info in providers.find_all(targets, SwiftInfo):
transitive_defines += swift_info.transitive_defines
transitive_libs += swift_info.transitive_libs
transitive_modules += swift_info.transitive_modules
transitive_docs += swift_info.transitive_docs
return SwiftInfo(
direct_lib=None,
direct_module=None,
direct_doc=None,
swift_version=None,
transitive_defines=transitive_defines,
transitive_libs=transitive_libs,
transitive_modules=transitive_modules,
transitive_docs=transitive_docs,
)
def merge_swift_objc_providers(targets):
"""Merges the transitive objc info of the given targets into a new provider.
This is restricted to the keys of the objc provider that are used by Swift
compile actions to propagate information about Swift compiled libraries back
up to linker actions and so forth.
This function should be used when it is necessary to merge objc providers
created by other Swift libraries outside of a compile action (which does it
automatically).
Args:
targets: A sequence of targets that may propagate objc providers. Those that
do not are ignored.
Returns:
A new objc provider that contains the transitive information from all the
targets.
"""
libraries = depset()
headers = depset()
link_inputs = depset()
linkopts = depset()
for objc in providers.find_all(targets, "objc"):
libraries += objc.library
headers += objc.header
link_inputs += objc.link_inputs
linkopts += objc.linkopt
objc_provider_args = {"uses_swift": True}
if headers:
objc_provider_args["header"] = headers
if libraries:
objc_provider_args["library"] = libraries
if linkopts:
objc_provider_args["linkopt"] = linkopts
if link_inputs:
objc_provider_args["link_inputs"] = link_inputs
return apple_common.new_objc_provider(**objc_provider_args)
def _collect_resource_sets(resources, structured_resources, deps, module_name):
"""Collects resource sets from the target and its dependencies.
Args:
resources: The resources associated with the target being built.
structured_resources: The structured resources associated with the target
being built.
deps: The dependencies of the target being built.
module_name: The name of the Swift module associated with the resources
(either the user-provided name, or the auto-generated one).
Returns:
A list of structs representing the transitive resources to propagate to the
bundling rules.
"""
resource_sets = []
# Create a resource set from the resources attached directly to this target.
if resources or structured_resources:
resource_sets.append(AppleResourceSet(
resources=depset(resources),
structured_resources=depset(structured_resources),
swift_module=module_name,
))
# Collect transitive resource sets from dependencies.
for dep in deps:
if AppleResourceInfo in dep:
resource_sets.extend(dep[AppleResourceInfo].resource_sets)
return resource_sets
def _swift_library_impl(ctx):
"""Implementation for swift_library Skylark rule."""
_validate_rule_and_deps(ctx)
resolved_module_name = ctx.attr.module_name or swift_module_name(ctx.label)
reqs = swift_compile_requirements(
ctx.files.srcs,
ctx.attr.deps,
resolved_module_name,
ctx.label,
ctx.attr.copts,
ctx.attr.defines,
ctx.fragments.apple,
ctx.fragments.objc,
ctx.fragments.swift,
ctx.var,
ctx.configuration,
ctx.attr._xcode_config[apple_common.XcodeVersionConfig],
ctx.genfiles_dir,
ctx.features)
compile_outputs, objc_provider, swift_info = register_swift_compile_actions(
ctx, reqs)
resource_sets = _collect_resource_sets(
ctx.files.resources, ctx.files.structured_resources, ctx.attr.deps,
resolved_module_name)
return struct(
files=depset(compile_outputs),
swift=struct(
direct_lib=swift_info.direct_lib,
direct_module=swift_info.direct_module,
direct_doc=swift_info.direct_doc,
swift_version=swift_info.swift_version,
transitive_libs=swift_info.transitive_libs,
transitive_modules=swift_info.transitive_modules,
transitive_docs=swift_info.transitive_docs,
transitive_defines=swift_info.transitive_defines,
),
objc=objc_provider,
providers=[
AppleResourceInfo(resource_sets=resource_sets),
swift_info,
])
SWIFT_LIBRARY_ATTRS = {
"srcs": attr.label_list(allow_files = [".swift"], allow_empty=False),
"deps": attr.label_list(
# TODO(b/37902442): Figure out why this is required here; it seems like
# having it on the binary should be sufficient because the aspect goes
# down all deps, but without this, the aspect runs *after* this rule
# gets to examine its deps (so the AppleResource provider isn't there
# yet).
aspects=[apple_bundling_aspect],
providers=[["swift"], [SwiftInfo], ["objc"]]
),
"module_name": attr.string(mandatory=False),
"defines": attr.string_list(mandatory=False, allow_empty=True),
"copts": attr.string_list(mandatory=False, allow_empty=True),
"resources": attr.label_list(
mandatory=False,
allow_empty=True,
allow_files=True),
"structured_resources": attr.label_list(
mandatory=False,
allow_empty=True,
allow_files=True),
"_xcode_config": attr.label(
default=configuration_field(
fragment="apple", name="xcode_config_label")),
"_xcrunwrapper": attr.label(
executable=True,
cfg="host",
default=Label(XCRUNWRAPPER_LABEL))
}
swift_library = rule(
_swift_library_impl,
attrs = SWIFT_LIBRARY_ATTRS,
fragments = ["apple", "objc", "swift"],
output_to_genfiles=True,
)
"""
Builds a Swift module.
A module is a pair of static library (.a) + module header (.swiftmodule).
Dependant targets can import this module as "import RuleName".
Args:
srcs: Swift sources that comprise this module.
deps: Other Swift modules.
module_name: Optional. Sets the Swift module name for this target. By default
the module name is the target path with all special symbols replaced
by "_", e.g. //foo:bar can be imported as "foo_bar".
copts: A list of flags passed to swiftc command line.
defines: Each VALUE in this attribute is passed as -DVALUE to the compiler for
this and dependent targets.
"""
| """Skylark rules for Swift."""
load('@bazel_skylib//lib:collections.bzl', 'collections')
load('@build_bazel_rules_apple//apple/bundling:apple_bundling_aspect.bzl', 'apple_bundling_aspect')
load('@build_bazel_rules_apple//apple:providers.bzl', 'AppleResourceInfo', 'AppleResourceSet', 'SwiftInfo')
load('@build_bazel_rules_apple//apple:utils.bzl', 'xcrun_action', 'XCRUNWRAPPER_LABEL', 'module_cache_path', 'label_scoped_path')
load('@build_bazel_rules_apple//apple/bundling:xcode_support.bzl', 'xcode_support')
load('@build_bazel_rules_apple//common:attrs.bzl', 'attrs')
load('@build_bazel_rules_apple//common:providers.bzl', 'providers')
def _parent_dirs(dirs):
"""Returns a set of parent directories for each directory in dirs."""
return depset(direct=[f.rpartition('/')[0] for f in dirs])
def _framework_names(dirs):
"""Returns the framework name for each directory in dir."""
return depset(direct=[f.rpartition('/')[2].partition('.')[0] for f in dirs])
def _swift_target(cpu, platform, sdk_version):
"""Returns a target triplet for Swift compiler."""
platform_string = str(platform.platform_type)
if platform_string not in ['ios', 'watchos', 'tvos', 'macos']:
fail("Platform '%s' is not supported" % platform_string)
if platform_string == 'macos':
platform_string = 'macosx'
return '%s-apple-%s%s' % (cpu, platform_string, sdk_version)
def _swift_compilation_mode_flags(config_vars, objc_fragment):
"""Returns additional `swiftc` flags for the current compilation mode.
Args:
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
objc_fragment: The Objective-C configuration fragment.
Returns:
The additional command line flags to pass to `swiftc`.
"""
mode = config_vars['COMPILATION_MODE']
flags = []
if mode == 'dbg' or mode == 'fastbuild':
flags += ['-Onone', '-DDEBUG', '-enable-testing', '-Xfrontend', '-serialize-debugging-options']
elif mode == 'opt':
flags += ['-O', '-DNDEBUG']
if mode == 'dbg' or objc_fragment.generate_dsym:
flags.append('-g')
return flags
def _clang_compilation_mode_flags(objc_fragment):
"""Returns additional clang flags for the current compilation mode."""
native_clang_flags = objc_fragment.copts_for_current_compilation_mode
return [x for x in native_clang_flags if x != '-g']
def _swift_bitcode_flags(apple_fragment):
"""Returns bitcode flags based on selected mode."""
mode = str(apple_fragment.bitcode_mode)
if mode == 'embedded':
return ['-embed-bitcode']
elif mode == 'embedded_markers':
return ['-embed-bitcode-marker']
return []
def _swift_sanitizer_flags(features):
"""Returns sanitizer flags."""
sanitizer_features_to_flags = {'asan': ['-sanitize=address']}
sanitizer_flags = []
for (feature, flags) in sanitizer_features_to_flags.items():
if feature in features:
sanitizer_flags.extend(flags)
return sanitizer_flags
def swift_module_name(label):
"""Returns a module name for the given label."""
prefix = label.package.lstrip('//').replace('/', '_').replace('-', '_')
suffix = label.name.replace('-', '_')
if prefix:
return prefix + '_' + suffix
else:
return suffix
def _swift_lib_dir(apple_fragment, config_vars, is_static=False):
"""Returns the location of Swift runtime directory to link against.
Args:
apple_fragment: The Apple configuration fragment.
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
is_static: If True, the static library directory will be used instead of the
dynamic library directory (currently available only on macOS).
Returns:
The location of the Swift runtime directory to link against.
"""
dir_name = 'swift_static' if is_static else 'swift'
platform_str = apple_fragment.single_arch_platform.name_in_plist.lower()
if 'xcode_toolchain_path' in config_vars:
return '{0}/usr/lib/{1}/{2}'.format(config_vars['xcode_toolchain_path'], dir_name, platform_str)
return '{0}/Toolchains/XcodeDefault.xctoolchain/usr/lib/{1}/{2}'.format(apple_common.apple_toolchain().developer_dir(), dir_name, platform_str)
def swift_linkopts(apple_fragment, config_vars, is_static=False):
"""Returns additional linker arguments needed to link Swift.
Args:
apple_fragment: The Apple configuration fragment.
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
is_static: If True, the static library directory will be used instead of the
dynamic library directory (currently available only on macOS).
Returns:
Additional linker arguments needed to link Swift.
"""
return ['-L' + _swift_lib_dir(apple_fragment, config_vars, is_static)]
def _swift_parsing_flags(srcs):
"""Returns additional parsing flags for swiftc."""
if len(srcs) == 1 and srcs[0].basename != 'main.swift':
return ['-parse-as-library']
return []
def _is_valid_swift_module_name(string):
"""Returns True if the string is a valid Swift module name."""
if not string:
return False
for char in string:
if not (char.isalnum() or char == '_'):
return False
return True
def _validate_rule_and_deps(ctx):
"""Validates the target and its dependencies."""
name_error_str = "Error in target '%s', Swift target and its dependencies' " + 'names can only contain characters in [a-zA-Z0-9_].'
if not _is_valid_swift_module_name(ctx.label.name):
fail(name_error_str % ctx.label)
for dep in ctx.attr.deps:
if not _is_valid_swift_module_name(dep.label.name):
fail(name_error_str % dep.label)
def _get_wmo_state(copts, swift_fragment):
"""Returns the status of Whole Module Optimization feature.
Args:
copts: The list of copts to search for WMO flags.
swift_fragment: The Swift configuration fragment.
Returns:
A Boolean value indicating whether WMO has been enabled.
"""
all_copts = copts + swift_fragment.copts()
return '-wmo' in all_copts or '-whole-module-optimization' in all_copts
def swift_compile_requirements(srcs, deps, module_name, label, copts, defines, apple_fragment, objc_fragment, swift_fragment, config_vars, default_configuration, xcode_config, genfiles_dir, features):
"""Returns a struct that contains the requirements to compile Swift code.
Args:
srcs: The list of `*.swift` sources to compile.
deps: The list of targets that are dependencies for the sources being
compiled.
module_name: The name of the Swift module to which the compiled files
belong.
label: The label used to generate the Swift module name if one was not
provided.
copts: A list of compiler options to pass to `swiftc`. Defaults to an empty
list.
defines: A list of compiler defines to pass to `swiftc`. Defaults to an
empty list.
apple_fragment: The Apple configuration fragment.
objc_fragment: The Objective-C configuration fragment.
swift_fragment: The Swift configuration fragment.
config_vars: The dictionary of configuration variables (i.e., `ctx.var`)
that affect compilation of this target.
default_configuration: The default configuration retrieved from the rule
context.
xcode_config: the XcodeVersionConfig to use
genfiles_dir: The directory where genfiles are written.
features: List of enabled features as passed with --features or set into the
features attribute.
Returns:
A structure that contains the information required to compile Swift code.
"""
return struct(srcs=srcs, deps=deps, module_name=module_name, label=label, copts=copts, defines=defines, apple_fragment=apple_fragment, objc_fragment=objc_fragment, swift_fragment=swift_fragment, config_vars=config_vars, default_configuration=default_configuration, xcode_config=xcode_config, genfiles_dir=genfiles_dir, features=features)
def swiftc_inputs(ctx):
"""Determine the list of inputs required for the compile action.
Args:
ctx: rule context.
Returns:
A list of files needed by swiftc.
"""
return _swiftc_inputs(ctx.files.srcs, ctx.attr.deps)
def _swiftc_inputs(srcs, deps=[]):
"""Determines the list of inputs required for a compile action.
Args:
srcs: A list of `*.swift` source files being compiled.
deps: A list of targetsthat are dependencies of the files being compiled.
Returns:
A list of files that should be passed as inputs to the Swift compilation
action.
"""
swift_providers = providers.find_all(deps, SwiftInfo)
dep_modules = depset(transitive=[swift.transitive_modules for swift in swift_providers])
transitive_objc = apple_common.new_objc_provider(providers=providers.find_all(deps, 'objc'))
objc_files = depset(transitive=[transitive_objc.header, transitive_objc.module_map, transitive_objc.umbrella_header, transitive_objc.static_framework_file, transitive_objc.dynamic_framework_file])
return srcs + dep_modules.to_list() + objc_files.to_list()
def swiftc_args(ctx):
"""Returns an almost compelete array of arguments to be passed to swiftc.
This macro is intended to be used by the swift_library rule implementation
below but it also may be used by other rules outside this file. It has no
side effects and does not modify ctx. It expects ctx to contain the same
fragments and attributes as swift_library (you're encouraged to depend on
SWIFT_LIBRARY_ATTRS in your rule definition).
Args:
ctx: rule context
Returns:
A list of command line arguments for swiftc. The returned arguments
include everything except the arguments generation of which would require
adding new files or actions.
"""
reqs = swift_compile_requirements(ctx.files.srcs, ctx.attr.deps, ctx.attr.module_name, ctx.label, ctx.attr.copts, ctx.attr.defines, ctx.fragments.apple, ctx.fragments.objc, ctx.fragments.swift, ctx.var, ctx.configuration, ctx.attr._xcode_config[apple_common.XcodeVersionConfig], ctx.genfiles_dir, ctx.features)
return _swiftc_args(reqs)
def _swiftc_args(reqs):
"""Returns an almost complete array of arguments to be passed to swiftc.
This macro is intended to be used by the swift_library rule implementation
below but it also may be used by other rules outside this file.
Args:
reqs: The compilation requirements as returned by
`swift_compile_requirements`.
Returns:
A list of command line arguments for `swiftc`. The returned arguments
include everything except the arguments generation of which would require
adding new files or actions.
"""
apple_fragment = reqs.apple_fragment
deps = reqs.deps
cpu = apple_fragment.single_arch_cpu
platform = apple_fragment.single_arch_platform
target_os = reqs.xcode_config.minimum_os_for_platform_type(platform.platform_type)
target = _swift_target(cpu, platform, target_os)
apple_toolchain = apple_common.apple_toolchain()
dep_modules = depset()
swiftc_defines = depset(reqs.defines)
swift_providers = providers.find_all(deps, SwiftInfo)
for swift in swift_providers:
dep_modules += swift.transitive_modules
swiftc_defines += swift.transitive_defines
objc_providers = providers.find_all(deps, 'objc')
transitive_objc = apple_common.new_objc_provider(providers=objc_providers)
objc_includes = []
for objc in objc_providers:
objc_includes += objc.include.to_list()
objc_includes = depset(objc_includes)
objc_module_maps = transitive_objc.module_map
static_frameworks = _framework_names(transitive_objc.framework_dir)
framework_dirs = depset(direct=[apple_toolchain.platform_developer_framework_dir(apple_fragment)], transitive=[_parent_dirs(transitive_objc.framework_dir), _parent_dirs(transitive_objc.dynamic_framework_dir)])
objc_defines = transitive_objc.define
srcs_args = [f.path for f in reqs.srcs]
include_dirs = depset([x.dirname for x in dep_modules])
include_dirs += depset([reqs.genfiles_dir.path])
include_args = ['-I%s' % d for d in include_dirs + objc_includes]
framework_args = ['-F%s' % x for x in framework_dirs]
define_args = ['-D%s' % x for x in swiftc_defines.to_list()]
autolink_args = collections.before_each('-Xfrontend', collections.before_each('-disable-autolink-framework', static_frameworks))
clang_args = collections.before_each('-Xcc', ['-iquote', '.'] + ['-D' + x for x in objc_defines] + reqs.objc_fragment.copts + _clang_compilation_mode_flags(reqs.objc_fragment) + ['-fmodule-map-file=%s' % x.path for x in objc_module_maps])
args = ['-emit-object', '-module-name', reqs.module_name, '-target', target, '-sdk', apple_toolchain.sdk_dir(), '-module-cache-path', module_cache_path(reqs.genfiles_dir)]
if reqs.default_configuration.coverage_enabled:
args.extend(['-profile-generate', '-profile-coverage-mapping'])
args.extend(_swift_compilation_mode_flags(reqs.config_vars, reqs.objc_fragment))
args.extend(_swift_bitcode_flags(apple_fragment))
args.extend(_swift_parsing_flags(reqs.srcs))
args.extend(_swift_sanitizer_flags(reqs.features))
args.extend(srcs_args)
args.extend(include_args)
args.extend(framework_args)
args.extend(clang_args)
args.extend(define_args)
args.extend(autolink_args)
args.extend(reqs.swift_fragment.copts())
args.extend(reqs.copts)
return args
def _find_swift_version(args):
"""Returns the value of the `-swift-version` argument, if found.
Args:
args: The command-line arguments to be scanned.
Returns:
The value of the `-swift-version` argument, or None if it was not found in
the argument list.
"""
last_swift_version = None
count = len(args)
for i in range(count):
arg = args[i]
if arg == '-swift-version' and i + 1 < count:
last_swift_version = args[i + 1]
return last_swift_version
def register_swift_compile_actions(ctx, reqs):
"""Registers actions to compile Swift sources.
Args:
ctx: The rule context. Within this function, it should only be used to
register actions, or declare files; do not use it to access attributes
because it may be called from many different rules.
reqs: The compilation requirements as returned by
`swift_compile_requirements`.
Returns:
A tuple containing the (1) output files of the compilation action, the (2)
`objc` provider, and (3) the `SwiftInfo` provider that should be propagated
by a target compiling these Swift sources.
"""
module_name = reqs.module_name
label = reqs.label
dep_modules = depset()
dep_libs = depset()
dep_docs = depset()
swiftc_defines = depset(reqs.defines)
swift_providers = providers.find_all(reqs.deps, SwiftInfo)
for swift in swift_providers:
dep_libs += swift.transitive_libs
dep_modules += swift.transitive_modules
dep_docs += swift.transitive_docs
swiftc_defines += swift.transitive_defines
objs_outputs_path = label_scoped_path(reqs.label, '_objs/')
output_lib = ctx.new_file(objs_outputs_path + module_name + '.a')
output_module = ctx.new_file(objs_outputs_path + module_name + '.swiftmodule')
output_doc = ctx.new_file(objs_outputs_path + module_name + '.swiftdoc')
output_header = ctx.new_file(label.name + '-Swift.h')
swiftc_output_map_file = ctx.new_file(label.name + '.output_file_map.json')
swiftc_output_map = struct()
output_objs = []
swiftc_outputs = []
has_wmo = _get_wmo_state(reqs.copts, reqs.swift_fragment)
for source in reqs.srcs:
basename = source.basename
output_map_entry = {}
obj = ctx.new_file(objs_outputs_path + basename + '.o')
output_objs.append(obj)
output_map_entry['object'] = obj.path
if not has_wmo:
partial_module = ctx.new_file(objs_outputs_path + basename + '.partial_swiftmodule')
swiftc_outputs.append(partial_module)
output_map_entry['swiftmodule'] = partial_module.path
swiftc_output_map += struct(**{source.path: struct(**output_map_entry)})
ctx.file_action(output=swiftc_output_map_file, content=swiftc_output_map.to_json())
args = ['swiftc'] + _swiftc_args(reqs)
swift_version = _find_swift_version(args)
args += ['-I' + output_module.dirname, '-emit-module-path', output_module.path, '-emit-objc-header-path', output_header.path, '-output-file-map', swiftc_output_map_file.path]
if has_wmo:
args.extend(['-num-threads', '12'])
xcrun_action(ctx, inputs=_swiftc_inputs(reqs.srcs, reqs.deps) + [swiftc_output_map_file], outputs=[output_module, output_header, output_doc] + output_objs + swiftc_outputs, mnemonic='SwiftCompile', arguments=args, use_default_shell_env=False, progress_message='Compiling Swift module %s (%d files)' % (reqs.label.name, len(reqs.srcs)))
xcrun_action(ctx, inputs=output_objs, outputs=(output_lib,), mnemonic='SwiftArchive', arguments=['libtool', '-static', '-o', output_lib.path] + [x.path for x in output_objs], progress_message='Archiving Swift objects %s' % reqs.label.name)
extra_linker_args = ['-Xlinker -add_ast_path -Xlinker ' + output_module.path]
transitive_libs = depset([output_lib]) + dep_libs
transitive_modules = depset([output_module]) + dep_modules
transitive_docs = depset([output_doc]) + dep_docs
compile_outputs = [output_lib, output_module, output_header, output_doc]
objc_providers = providers.find_all(reqs.deps, 'objc')
objc_provider_args = {'library': depset([output_lib]) + dep_libs, 'header': depset([output_header]), 'providers': objc_providers, 'link_inputs': depset([output_module]), 'uses_swift': True}
transitive_objc = apple_common.new_objc_provider(providers=objc_providers)
objc_provider_args['module_map'] = transitive_objc.module_map
platform_type = ctx.fragments.apple.single_arch_platform.platform_type
if platform_type != apple_common.platform_type.macos:
objc_provider_args['linkopt'] = depset(swift_linkopts(reqs.apple_fragment, reqs.config_vars) + extra_linker_args, order='topological')
objc_provider = apple_common.new_objc_provider(**objc_provider_args)
return (compile_outputs, objc_provider, swift_info(direct_lib=output_lib, direct_module=output_module, direct_doc=output_doc, swift_version=swift_version, transitive_libs=transitive_libs, transitive_modules=transitive_modules, transitive_defines=swiftc_defines, transitive_docs=transitive_docs))
def merge_swift_info_providers(targets):
"""Merges the transitive Swift info of the given targets into a new provider.
This function should be used when it is necessary to merge SwiftInfo providers
outside of a compile action (which does it automatically).
Args:
targets: A sequence of targets that may propagate SwiftInfo providers. Those
that do not are ignored.
Returns:
A new SwiftInfo provider that contains the transitive information from all
the targets.
"""
transitive_defines = depset()
transitive_libs = depset()
transitive_modules = depset()
transitive_docs = depset()
for swift_info in providers.find_all(targets, SwiftInfo):
transitive_defines += swift_info.transitive_defines
transitive_libs += swift_info.transitive_libs
transitive_modules += swift_info.transitive_modules
transitive_docs += swift_info.transitive_docs
return swift_info(direct_lib=None, direct_module=None, direct_doc=None, swift_version=None, transitive_defines=transitive_defines, transitive_libs=transitive_libs, transitive_modules=transitive_modules, transitive_docs=transitive_docs)
def merge_swift_objc_providers(targets):
"""Merges the transitive objc info of the given targets into a new provider.
This is restricted to the keys of the objc provider that are used by Swift
compile actions to propagate information about Swift compiled libraries back
up to linker actions and so forth.
This function should be used when it is necessary to merge objc providers
created by other Swift libraries outside of a compile action (which does it
automatically).
Args:
targets: A sequence of targets that may propagate objc providers. Those that
do not are ignored.
Returns:
A new objc provider that contains the transitive information from all the
targets.
"""
libraries = depset()
headers = depset()
link_inputs = depset()
linkopts = depset()
for objc in providers.find_all(targets, 'objc'):
libraries += objc.library
headers += objc.header
link_inputs += objc.link_inputs
linkopts += objc.linkopt
objc_provider_args = {'uses_swift': True}
if headers:
objc_provider_args['header'] = headers
if libraries:
objc_provider_args['library'] = libraries
if linkopts:
objc_provider_args['linkopt'] = linkopts
if link_inputs:
objc_provider_args['link_inputs'] = link_inputs
return apple_common.new_objc_provider(**objc_provider_args)
def _collect_resource_sets(resources, structured_resources, deps, module_name):
"""Collects resource sets from the target and its dependencies.
Args:
resources: The resources associated with the target being built.
structured_resources: The structured resources associated with the target
being built.
deps: The dependencies of the target being built.
module_name: The name of the Swift module associated with the resources
(either the user-provided name, or the auto-generated one).
Returns:
A list of structs representing the transitive resources to propagate to the
bundling rules.
"""
resource_sets = []
if resources or structured_resources:
resource_sets.append(apple_resource_set(resources=depset(resources), structured_resources=depset(structured_resources), swift_module=module_name))
for dep in deps:
if AppleResourceInfo in dep:
resource_sets.extend(dep[AppleResourceInfo].resource_sets)
return resource_sets
def _swift_library_impl(ctx):
"""Implementation for swift_library Skylark rule."""
_validate_rule_and_deps(ctx)
resolved_module_name = ctx.attr.module_name or swift_module_name(ctx.label)
reqs = swift_compile_requirements(ctx.files.srcs, ctx.attr.deps, resolved_module_name, ctx.label, ctx.attr.copts, ctx.attr.defines, ctx.fragments.apple, ctx.fragments.objc, ctx.fragments.swift, ctx.var, ctx.configuration, ctx.attr._xcode_config[apple_common.XcodeVersionConfig], ctx.genfiles_dir, ctx.features)
(compile_outputs, objc_provider, swift_info) = register_swift_compile_actions(ctx, reqs)
resource_sets = _collect_resource_sets(ctx.files.resources, ctx.files.structured_resources, ctx.attr.deps, resolved_module_name)
return struct(files=depset(compile_outputs), swift=struct(direct_lib=swift_info.direct_lib, direct_module=swift_info.direct_module, direct_doc=swift_info.direct_doc, swift_version=swift_info.swift_version, transitive_libs=swift_info.transitive_libs, transitive_modules=swift_info.transitive_modules, transitive_docs=swift_info.transitive_docs, transitive_defines=swift_info.transitive_defines), objc=objc_provider, providers=[apple_resource_info(resource_sets=resource_sets), swift_info])
swift_library_attrs = {'srcs': attr.label_list(allow_files=['.swift'], allow_empty=False), 'deps': attr.label_list(aspects=[apple_bundling_aspect], providers=[['swift'], [SwiftInfo], ['objc']]), 'module_name': attr.string(mandatory=False), 'defines': attr.string_list(mandatory=False, allow_empty=True), 'copts': attr.string_list(mandatory=False, allow_empty=True), 'resources': attr.label_list(mandatory=False, allow_empty=True, allow_files=True), 'structured_resources': attr.label_list(mandatory=False, allow_empty=True, allow_files=True), '_xcode_config': attr.label(default=configuration_field(fragment='apple', name='xcode_config_label')), '_xcrunwrapper': attr.label(executable=True, cfg='host', default=label(XCRUNWRAPPER_LABEL))}
swift_library = rule(_swift_library_impl, attrs=SWIFT_LIBRARY_ATTRS, fragments=['apple', 'objc', 'swift'], output_to_genfiles=True)
'\nBuilds a Swift module.\n\nA module is a pair of static library (.a) + module header (.swiftmodule).\nDependant targets can import this module as "import RuleName".\n\nArgs:\n srcs: Swift sources that comprise this module.\n deps: Other Swift modules.\n module_name: Optional. Sets the Swift module name for this target. By default\n the module name is the target path with all special symbols replaced\n by "_", e.g. //foo:bar can be imported as "foo_bar".\n copts: A list of flags passed to swiftc command line.\n defines: Each VALUE in this attribute is passed as -DVALUE to the compiler for\n this and dependent targets.\n' |
# -*- coding: utf-8 -*-
CSRF_ENABLED = True
SECRET_KEY = "208h3oiushefo9823liukhso8dyfhsdklihf"
debug = False
| csrf_enabled = True
secret_key = '208h3oiushefo9823liukhso8dyfhsdklihf'
debug = False |
def getLate():
v = Late(**{})
return v
class Late():
value = 'late'
| def get_late():
v = late(**{})
return v
class Late:
value = 'late' |
formatter = "{} {} {} {}"
print(formatter.format(1, 2, 3, 4))
print(formatter.format("one", "two", "three", "four"))
print(formatter.format(True, False, False, True))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format(
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
))
| formatter = '{} {} {} {}'
print(formatter.format(1, 2, 3, 4))
print(formatter.format('one', 'two', 'three', 'four'))
print(formatter.format(True, False, False, True))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format('I had this thing.', 'That you could type up right.', "But it didn't sing.", 'So I said goodnight.')) |
# Copyright 2014 PDFium authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Original code from V8, original license was:
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This file is used only by the standalone PDFium build. Under a chromium
# checkout, the src/testing/gtest.gyp file is used instead.
{
'targets': [
{
'target_name': 'gtest',
'toolsets': ['host', 'target'],
'type': 'static_library',
'sources': [
'gtest/include/gtest/gtest-death-test.h',
'gtest/include/gtest/gtest-message.h',
'gtest/include/gtest/gtest-param-test.h',
'gtest/include/gtest/gtest-printers.h',
'gtest/include/gtest/gtest-spi.h',
'gtest/include/gtest/gtest-test-part.h',
'gtest/include/gtest/gtest-typed-test.h',
'gtest/include/gtest/gtest.h',
'gtest/include/gtest/gtest_pred_impl.h',
'gtest/include/gtest/internal/gtest-death-test-internal.h',
'gtest/include/gtest/internal/gtest-filepath.h',
'gtest/include/gtest/internal/gtest-internal.h',
'gtest/include/gtest/internal/gtest-linked_ptr.h',
'gtest/include/gtest/internal/gtest-param-util-generated.h',
'gtest/include/gtest/internal/gtest-param-util.h',
'gtest/include/gtest/internal/gtest-port.h',
'gtest/include/gtest/internal/gtest-string.h',
'gtest/include/gtest/internal/gtest-tuple.h',
'gtest/include/gtest/internal/gtest-type-util.h',
'gtest/src/gtest-all.cc',
'gtest/src/gtest-death-test.cc',
'gtest/src/gtest-filepath.cc',
'gtest/src/gtest-internal-inl.h',
'gtest/src/gtest-port.cc',
'gtest/src/gtest-printers.cc',
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
'gtest-support.h',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
'gtest',
'gtest/include',
],
'dependencies': [
'gtest_prod',
],
'defines': [
# In order to allow regex matches in gtest to be shared between Windows
# and other systems, we tell gtest to always use it's internal engine.
'GTEST_HAS_POSIX_RE=0',
# Unit tests don't require C++11, yet.
'GTEST_LANG_CXX11=0',
],
'all_dependent_settings': {
'defines': [
'GTEST_HAS_POSIX_RE=0',
'GTEST_LANG_CXX11=0',
],
},
'conditions': [
['os_posix == 1', {
'defines': [
# gtest isn't able to figure out when RTTI is disabled for gcc
# versions older than 4.3.2, and assumes it's enabled. Our Mac
# and Linux builds disable RTTI, and cannot guarantee that the
# compiler will be 4.3.2. or newer. The Mac, for example, uses
# 4.2.1 as that is the latest available on that platform. gtest
# must be instructed that RTTI is disabled here, and for any
# direct dependents that might include gtest headers.
'GTEST_HAS_RTTI=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_RTTI=0',
],
},
}],
['OS=="android"', {
'defines': [
'GTEST_HAS_CLONE=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_CLONE=0',
],
},
}],
['OS=="android"', {
# We want gtest features that use tr1::tuple, but we currently
# don't support the variadic templates used by libstdc++'s
# implementation. gtest supports this scenario by providing its
# own implementation but we must opt in to it.
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
# GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
# gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
# automatically on android, so it has to be set explicitly here.
'GTEST_HAS_TR1_TUPLE=1',
],
'direct_dependent_settings': {
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
'GTEST_HAS_TR1_TUPLE=1',
],
},
}],
],
'direct_dependent_settings': {
'defines': [
'UNIT_TEST',
],
'include_dirs': [
'gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
'test': 1,
'conditions': [
['OS=="mac"', {
'run_as': {
'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'],
},
}],
['OS=="win"', {
'run_as': {
'action????': ['$(TargetPath)', '--gtest_print_time'],
},
}],
],
}],
],
'msvs_disabled_warnings': [4800],
},
},
{
'target_name': 'gtest_main',
'type': 'static_library',
'dependencies': [
'gtest',
],
'sources': [
'gtest/src/gtest_main.cc',
],
},
{
'target_name': 'gtest_prod',
'toolsets': ['host', 'target'],
'type': 'none',
'sources': [
'gtest/include/gtest/gtest_prod.h',
],
},
],
}
| {'targets': [{'target_name': 'gtest', 'toolsets': ['host', 'target'], 'type': 'static_library', 'sources': ['gtest/include/gtest/gtest-death-test.h', 'gtest/include/gtest/gtest-message.h', 'gtest/include/gtest/gtest-param-test.h', 'gtest/include/gtest/gtest-printers.h', 'gtest/include/gtest/gtest-spi.h', 'gtest/include/gtest/gtest-test-part.h', 'gtest/include/gtest/gtest-typed-test.h', 'gtest/include/gtest/gtest.h', 'gtest/include/gtest/gtest_pred_impl.h', 'gtest/include/gtest/internal/gtest-death-test-internal.h', 'gtest/include/gtest/internal/gtest-filepath.h', 'gtest/include/gtest/internal/gtest-internal.h', 'gtest/include/gtest/internal/gtest-linked_ptr.h', 'gtest/include/gtest/internal/gtest-param-util-generated.h', 'gtest/include/gtest/internal/gtest-param-util.h', 'gtest/include/gtest/internal/gtest-port.h', 'gtest/include/gtest/internal/gtest-string.h', 'gtest/include/gtest/internal/gtest-tuple.h', 'gtest/include/gtest/internal/gtest-type-util.h', 'gtest/src/gtest-all.cc', 'gtest/src/gtest-death-test.cc', 'gtest/src/gtest-filepath.cc', 'gtest/src/gtest-internal-inl.h', 'gtest/src/gtest-port.cc', 'gtest/src/gtest-printers.cc', 'gtest/src/gtest-test-part.cc', 'gtest/src/gtest-typed-test.cc', 'gtest/src/gtest.cc', 'gtest-support.h'], 'sources!': ['gtest/src/gtest-all.cc'], 'include_dirs': ['gtest', 'gtest/include'], 'dependencies': ['gtest_prod'], 'defines': ['GTEST_HAS_POSIX_RE=0', 'GTEST_LANG_CXX11=0'], 'all_dependent_settings': {'defines': ['GTEST_HAS_POSIX_RE=0', 'GTEST_LANG_CXX11=0']}, 'conditions': [['os_posix == 1', {'defines': ['GTEST_HAS_RTTI=0'], 'direct_dependent_settings': {'defines': ['GTEST_HAS_RTTI=0']}}], ['OS=="android"', {'defines': ['GTEST_HAS_CLONE=0'], 'direct_dependent_settings': {'defines': ['GTEST_HAS_CLONE=0']}}], ['OS=="android"', {'defines': ['GTEST_USE_OWN_TR1_TUPLE=1', 'GTEST_HAS_TR1_TUPLE=1'], 'direct_dependent_settings': {'defines': ['GTEST_USE_OWN_TR1_TUPLE=1', 'GTEST_HAS_TR1_TUPLE=1']}}]], 'direct_dependent_settings': {'defines': ['UNIT_TEST'], 'include_dirs': ['gtest/include'], 'target_conditions': [['_type=="executable"', {'test': 1, 'conditions': [['OS=="mac"', {'run_as': {'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}']}}], ['OS=="win"', {'run_as': {'action????': ['$(TargetPath)', '--gtest_print_time']}}]]}]], 'msvs_disabled_warnings': [4800]}}, {'target_name': 'gtest_main', 'type': 'static_library', 'dependencies': ['gtest'], 'sources': ['gtest/src/gtest_main.cc']}, {'target_name': 'gtest_prod', 'toolsets': ['host', 'target'], 'type': 'none', 'sources': ['gtest/include/gtest/gtest_prod.h']}]} |
activate_mse = 1
activate_adaptation_imp = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 1.0
refinement = 1
n_epochs_refinement = 10
lambda_regul = [0.01]
lambda_regul_s = [0.01]
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class DannMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannIgnoreMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [1],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-adapt_only_first": [1],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannZeroImputMNISTUSPS(object):
MAX_NB_PROCESSES = 3
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2.5],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DannImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["dann_imput"],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-stop_grad": [0],
"-n_epochs": [100],
"-batch_size": [128],
"-initialize_model": [1],
"-init_batch_size": [32],
"-weight_d2": [weight_d2],
"-weight_mse": [weight_mse],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-init_lr": [10 ** -2],
"-refinement": [refinement],
"-n_epochs_refinement": [n_epochs_refinement],
"-lambda_regul": lambda_regul,
"-lambda_regul_s": lambda_regul_s,
"-threshold_value": threshold_value,
"-random_seed": random_seed
}
class DjdotMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotIgnoreMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-output_fig": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-adapt_only_first": [1],
"-random_seed": random_seed
}
class DjdotZeroImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot"],
"-upper_bound": [0],
"-adaptive_lr": [1],
"-is_balanced": [1],
"-djdot_alpha": [0.1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-epoch_to_start_align": [11],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-random_seed": random_seed
}
class DjdotImputMNISTUSPS(object):
MAX_NB_PROCESSES = 2
DEBUG = False
BINARY = "experiments/launcher/digits_binary.py"
GRID = {
"-mode": ["djdot_imput"],
"-adaptive_lr": [1],
"-source": ["MNIST"],
"-target": ["USPS"],
"-is_balanced": [1],
"-epoch_to_start_align": [11],
"-stop_grad": [1],
"-djdot_alpha": [0.1],
"-bigger_reconstructor": [1],
"-n_epochs": [100],
"-batch_size": [500],
"-initialize_model": [1],
"-init_batch_size": [32],
"-init_lr": [10 ** -2],
"-activate_mse": [activate_mse],
"-activate_adaptation_imp": [activate_adaptation_imp],
"-activate_adaptation_d1": [activate_adaptation_d1],
"-random_seed": random_seed
}
| activate_mse = 1
activate_adaptation_imp = 1
activate_adaptation_d1 = 1
weight_d2 = 1.0
weight_mse = 1.0
refinement = 1
n_epochs_refinement = 10
lambda_regul = [0.01]
lambda_regul_s = [0.01]
threshold_value = [0.95]
compute_variance = False
random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465]
class Dannmnistusps(object):
max_nb_processes = 3
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann'], '-upper_bound': [1], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-lambda_regul_s': lambda_regul_s, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Dannignoremnistusps(object):
max_nb_processes = 3
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann'], '-upper_bound': [1], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-adapt_only_first': [1], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Dannzeroimputmnistusps(object):
max_nb_processes = 3
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann'], '-upper_bound': [0], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2.5)], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Dannimputmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['dann_imput'], '-adaptive_lr': [1], '-is_balanced': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-stop_grad': [0], '-n_epochs': [100], '-batch_size': [128], '-initialize_model': [1], '-init_batch_size': [32], '-weight_d2': [weight_d2], '-weight_mse': [weight_mse], '-activate_mse': [activate_mse], '-activate_adaptation_imp': [activate_adaptation_imp], '-activate_adaptation_d1': [activate_adaptation_d1], '-init_lr': [10 ** (-2)], '-refinement': [refinement], '-n_epochs_refinement': [n_epochs_refinement], '-lambda_regul': lambda_regul, '-lambda_regul_s': lambda_regul_s, '-threshold_value': threshold_value, '-random_seed': random_seed}
class Djdotmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot'], '-upper_bound': [1], '-is_balanced': [1], '-djdot_alpha': [0.1], '-adaptive_lr': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-random_seed': random_seed}
class Djdotignoremnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot'], '-upper_bound': [1], '-is_balanced': [1], '-djdot_alpha': [0.1], '-adaptive_lr': [1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-output_fig': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-adapt_only_first': [1], '-random_seed': random_seed}
class Djdotzeroimputmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot'], '-upper_bound': [0], '-adaptive_lr': [1], '-is_balanced': [1], '-djdot_alpha': [0.1], '-source': ['MNIST'], '-target': ['USPS'], '-epoch_to_start_align': [11], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-random_seed': random_seed}
class Djdotimputmnistusps(object):
max_nb_processes = 2
debug = False
binary = 'experiments/launcher/digits_binary.py'
grid = {'-mode': ['djdot_imput'], '-adaptive_lr': [1], '-source': ['MNIST'], '-target': ['USPS'], '-is_balanced': [1], '-epoch_to_start_align': [11], '-stop_grad': [1], '-djdot_alpha': [0.1], '-bigger_reconstructor': [1], '-n_epochs': [100], '-batch_size': [500], '-initialize_model': [1], '-init_batch_size': [32], '-init_lr': [10 ** (-2)], '-activate_mse': [activate_mse], '-activate_adaptation_imp': [activate_adaptation_imp], '-activate_adaptation_d1': [activate_adaptation_d1], '-random_seed': random_seed} |
# encoding: utf-8
# module cv2.xphoto
# from /home/davtoh/anaconda3/envs/rrtools/lib/python3.5/site-packages/cv2.cpython-35m-x86_64-linux-gnu.so
# by generator 1.144
# no doc
# no imports
# Variables with simple values
BM3D_STEP1 = 1
BM3D_STEP2 = 2
BM3D_STEPALL = 0
HAAR = 0
INPAINT_SHIFTMAP = 0
__loader__ = None
__spec__ = None
# functions
# real signature unknown; restored from __doc__
def applyChannelGains(src, gainB, gainG, gainR, dst=None):
""" applyChannelGains(src, gainB, gainG, gainR[, dst]) -> dst """
pass
def bm3dDenoising(src, dstStep1, dstStep2=None, h=None, templateWindowSize=None, searchWindowSize=None, blockMatchingStep1=None, blockMatchingStep2=None, groupSize=None, slidingStep=None, beta=None, normType=None, step=None, transformType=None): # real signature unknown; restored from __doc__
""" bm3dDenoising(src, dstStep1[, dstStep2[, h[, templateWindowSize[, searchWindowSize[, blockMatchingStep1[, blockMatchingStep2[, groupSize[, slidingStep[, beta[, normType[, step[, transformType]]]]]]]]]]]]) -> dstStep1, dstStep2 or bm3dDenoising(src[, dst[, h[, templateWindowSize[, searchWindowSize[, blockMatchingStep1[, blockMatchingStep2[, groupSize[, slidingStep[, beta[, normType[, step[, transformType]]]]]]]]]]]]) -> dst """
pass
def createGrayworldWB(): # real signature unknown; restored from __doc__
""" createGrayworldWB() -> retval """
pass
# real signature unknown; restored from __doc__
def createLearningBasedWB(path_to_model=None):
""" createLearningBasedWB([, path_to_model]) -> retval """
pass
def createSimpleWB(): # real signature unknown; restored from __doc__
""" createSimpleWB() -> retval """
pass
# real signature unknown; restored from __doc__
def dctDenoising(src, dst, sigma, psize=None):
""" dctDenoising(src, dst, sigma[, psize]) -> None """
pass
def inpaint(src, mask, dst, algorithmType): # real signature unknown; restored from __doc__
""" inpaint(src, mask, dst, algorithmType) -> None """
pass
# no classes
| bm3_d_step1 = 1
bm3_d_step2 = 2
bm3_d_stepall = 0
haar = 0
inpaint_shiftmap = 0
__loader__ = None
__spec__ = None
def apply_channel_gains(src, gainB, gainG, gainR, dst=None):
""" applyChannelGains(src, gainB, gainG, gainR[, dst]) -> dst """
pass
def bm3d_denoising(src, dstStep1, dstStep2=None, h=None, templateWindowSize=None, searchWindowSize=None, blockMatchingStep1=None, blockMatchingStep2=None, groupSize=None, slidingStep=None, beta=None, normType=None, step=None, transformType=None):
""" bm3dDenoising(src, dstStep1[, dstStep2[, h[, templateWindowSize[, searchWindowSize[, blockMatchingStep1[, blockMatchingStep2[, groupSize[, slidingStep[, beta[, normType[, step[, transformType]]]]]]]]]]]]) -> dstStep1, dstStep2 or bm3dDenoising(src[, dst[, h[, templateWindowSize[, searchWindowSize[, blockMatchingStep1[, blockMatchingStep2[, groupSize[, slidingStep[, beta[, normType[, step[, transformType]]]]]]]]]]]]) -> dst """
pass
def create_grayworld_wb():
""" createGrayworldWB() -> retval """
pass
def create_learning_based_wb(path_to_model=None):
""" createLearningBasedWB([, path_to_model]) -> retval """
pass
def create_simple_wb():
""" createSimpleWB() -> retval """
pass
def dct_denoising(src, dst, sigma, psize=None):
""" dctDenoising(src, dst, sigma[, psize]) -> None """
pass
def inpaint(src, mask, dst, algorithmType):
""" inpaint(src, mask, dst, algorithmType) -> None """
pass |
# flopy version file automatically created using...pre-commit.py
# created on...March 20, 2018 17:03:11
major = 3
minor = 2
micro = 9
build = 60
commit = 2731
__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)
__build__ = '{:d}.{:d}.{:d}.{:d}'.format(major, minor, micro, build)
__git_commit__ = '{:d}'.format(commit)
| major = 3
minor = 2
micro = 9
build = 60
commit = 2731
__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)
__build__ = '{:d}.{:d}.{:d}.{:d}'.format(major, minor, micro, build)
__git_commit__ = '{:d}'.format(commit) |
#!/usr/bin/env python3
def main():
with open("dnsservers.txt", "r") as dnsfile:
for svr in dnsfile:
svr = svr.rstrip('\n') # remove newline char if exists
# would exists on all but last line
# IF the string svr ends with 'org'
if svr.endswith('org'):
with open("org-domain.txt", "a") as srvfile: # 'a' is append mode
srvfile.write(svr + "\n")
# ELSE-IF the string svr ends with 'com'
elif svr.endswith('com'):
with open("com-domain.txt", "a") as srvfile: # 'a' is append mode
srvfile.write(svr + "\n")
main()
| def main():
with open('dnsservers.txt', 'r') as dnsfile:
for svr in dnsfile:
svr = svr.rstrip('\n')
if svr.endswith('org'):
with open('org-domain.txt', 'a') as srvfile:
srvfile.write(svr + '\n')
elif svr.endswith('com'):
with open('com-domain.txt', 'a') as srvfile:
srvfile.write(svr + '\n')
main() |
a, b = map(int, input('').split(' '))
n = int(input(''))
ans = 0
for i in range(n):
shop = [int(i) for i in input('').split(' ') if abs(int(i)) == a or b]
if shop.count(a) > shop.count(-a) and shop.count(b) > shop.count(-b):
ans += 1
print(ans) | (a, b) = map(int, input('').split(' '))
n = int(input(''))
ans = 0
for i in range(n):
shop = [int(i) for i in input('').split(' ') if abs(int(i)) == a or b]
if shop.count(a) > shop.count(-a) and shop.count(b) > shop.count(-b):
ans += 1
print(ans) |
# Break Statement :
greetings = ["Hello","World","!!!"]
for x in greetings:
print(x)
if (x == "World"):
break #Breaks the loop when condition matches
print()
for x in range (0,22,2):
if (x == 10):
continue #Skips the current iteration when condition matches
print(x)
input("Press Enter key to exit ")
| greetings = ['Hello', 'World', '!!!']
for x in greetings:
print(x)
if x == 'World':
break
print()
for x in range(0, 22, 2):
if x == 10:
continue
print(x)
input('Press Enter key to exit ') |
HOST = '127.0.0.1'
USERNAME = 'guest'
PASSWORD = 'guest'
URI = 'amqp://guest:guest@127.0.0.1:5672/%2F'
HTTP_URL = 'http://127.0.0.1:15672'
| host = '127.0.0.1'
username = 'guest'
password = 'guest'
uri = 'amqp://guest:guest@127.0.0.1:5672/%2F'
http_url = 'http://127.0.0.1:15672' |
load("@bazel_skylib//lib:paths.bzl", "paths")
def _add_data_impl(ctx):
(_, extension) = paths.split_extension(ctx.executable.executable.path)
executable = ctx.actions.declare_file(
ctx.label.name + extension,
)
ctx.actions.symlink(
output = executable,
target_file = ctx.executable.executable,
is_executable = True,
)
runfiles = ctx.runfiles(files = [executable, ctx.executable.executable] + ctx.files.data)
runfiles = runfiles.merge(ctx.attr.executable[DefaultInfo].default_runfiles)
for data_dep in ctx.attr.data:
runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles)
return [DefaultInfo(
executable = executable,
files = depset(direct = [executable]),
runfiles = runfiles,
)]
add_data = rule(
_add_data_impl,
attrs = {
"executable": attr.label(
executable = True,
cfg = "target",
doc = "Create a symlink to this executable",
),
"data": attr.label_list(
allow_files = True,
doc = "Add these data files to the executable's runfiles",
),
},
executable = True,
doc = "Creates a new target for the given executable with additional runfiles.",
)
| load('@bazel_skylib//lib:paths.bzl', 'paths')
def _add_data_impl(ctx):
(_, extension) = paths.split_extension(ctx.executable.executable.path)
executable = ctx.actions.declare_file(ctx.label.name + extension)
ctx.actions.symlink(output=executable, target_file=ctx.executable.executable, is_executable=True)
runfiles = ctx.runfiles(files=[executable, ctx.executable.executable] + ctx.files.data)
runfiles = runfiles.merge(ctx.attr.executable[DefaultInfo].default_runfiles)
for data_dep in ctx.attr.data:
runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles)
return [default_info(executable=executable, files=depset(direct=[executable]), runfiles=runfiles)]
add_data = rule(_add_data_impl, attrs={'executable': attr.label(executable=True, cfg='target', doc='Create a symlink to this executable'), 'data': attr.label_list(allow_files=True, doc="Add these data files to the executable's runfiles")}, executable=True, doc='Creates a new target for the given executable with additional runfiles.') |
""" """
def words_to_snake_case(s):
components = s.split(' ')
return '_'.join(x.lower() for x in components)
| """ """
def words_to_snake_case(s):
components = s.split(' ')
return '_'.join((x.lower() for x in components)) |
#What will this script produce?
#A: 3
a = 1
a = 2
a = 3
print(a)
| a = 1
a = 2
a = 3
print(a) |
#addintersert3.py
def addInterest(balances, rate):
for i in range(len(balances)):
balances[i] = balances[i] * (1 + rate)
def main():
amounts = [1000, 105, 3500, 739]
rate = 0.05
addInterest(amounts, rate)
print(amounts)
main() | def add_interest(balances, rate):
for i in range(len(balances)):
balances[i] = balances[i] * (1 + rate)
def main():
amounts = [1000, 105, 3500, 739]
rate = 0.05
add_interest(amounts, rate)
print(amounts)
main() |
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should
# not be documented for external users. These will generally be used for internal test or only
# given to customers when they have been briefed on the side effects of using them.
INTERNAL_ONLY_PROPERTIES = {
"__module__",
"__doc__",
"create_transaction",
"SESSION_COOKIE_NAME",
"SESSION_COOKIE_HTTPONLY",
"SESSION_COOKIE_SAMESITE",
"DATABASE_SECRET_KEY",
"V22_NAMESPACE_BLACKLIST",
"MAXIMUM_CNR_LAYER_SIZE",
"OCI_NAMESPACE_WHITELIST",
"FEATURE_GENERAL_OCI_SUPPORT",
"FEATURE_HELM_OCI_SUPPORT",
"FEATURE_NAMESPACE_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_GARBAGE_COLLECTION",
"FEATURE_REPOSITORY_ACTION_COUNTER",
"APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST",
"APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST",
"FEATURE_MANIFEST_SIZE_BACKFILL",
"TESTING",
"SEND_FILE_MAX_AGE_DEFAULT",
"DISABLED_FOR_AUDIT_LOGS",
"DISABLED_FOR_PULL_LOGS",
"FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES",
"FEATURE_CLEAR_EXPIRED_RAC_ENTRIES",
"ACTION_LOG_MAX_PAGE",
"NON_RATE_LIMITED_NAMESPACES",
"REPLICATION_QUEUE_NAME",
"DOCKERFILE_BUILD_QUEUE_NAME",
"CHUNK_CLEANUP_QUEUE_NAME",
"SECURITY_SCANNER_ISSUER_NAME",
"NOTIFICATION_QUEUE_NAME",
"REPOSITORY_GC_QUEUE_NAME",
"NAMESPACE_GC_QUEUE_NAME",
"EXPORT_ACTION_LOGS_QUEUE_NAME",
"SECSCAN_V4_NOTIFICATION_QUEUE_NAME",
"FEATURE_BILLING",
"BILLING_TYPE",
"INSTANCE_SERVICE_KEY_LOCATION",
"INSTANCE_SERVICE_KEY_REFRESH",
"INSTANCE_SERVICE_KEY_SERVICE",
"INSTANCE_SERVICE_KEY_KID_LOCATION",
"INSTANCE_SERVICE_KEY_EXPIRATION",
"UNAPPROVED_SERVICE_KEY_TTL_SEC",
"EXPIRED_SERVICE_KEY_TTL_SEC",
"REGISTRY_JWT_AUTH_MAX_FRESH_S",
"SERVICE_LOG_ACCOUNT_ID",
"BUILDLOGS_OPTIONS",
"LIBRARY_NAMESPACE",
"STAGGER_WORKERS",
"QUEUE_WORKER_METRICS_REFRESH_SECONDS",
"PUSH_TEMP_TAG_EXPIRATION_SEC",
"GARBAGE_COLLECTION_FREQUENCY",
"PAGE_TOKEN_KEY",
"BUILD_MANAGER",
"JWTPROXY_AUDIENCE",
"JWTPROXY_SIGNER",
"SECURITY_SCANNER_INDEXING_MIN_ID",
"SECURITY_SCANNER_V4_REINDEX_THRESHOLD",
"STATIC_SITE_BUCKET",
"LABEL_KEY_RESERVED_PREFIXES",
"TEAM_SYNC_WORKER_FREQUENCY",
"JSONIFY_PRETTYPRINT_REGULAR",
"TUF_GUN_PREFIX",
"LOGGING_LEVEL",
"SIGNED_GRANT_EXPIRATION_SEC",
"PROMETHEUS_PUSHGATEWAY_URL",
"DB_TRANSACTION_FACTORY",
"NOTIFICATION_SEND_TIMEOUT",
"QUEUE_METRICS_TYPE",
"MAIL_FAIL_SILENTLY",
"LOCAL_OAUTH_HANDLER",
"USE_CDN",
"ANALYTICS_TYPE",
"LAST_ACCESSED_UPDATE_THRESHOLD_S",
"GREENLET_TRACING",
"EXCEPTION_LOG_TYPE",
"SENTRY_DSN",
"SENTRY_PUBLIC_DSN",
"BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT",
"THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT",
"IP_DATA_API_KEY",
"SECURITY_SCANNER_ENDPOINT_BATCH",
"SECURITY_SCANNER_API_TIMEOUT_SECONDS",
"SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS",
"SECURITY_SCANNER_ENGINE_VERSION_TARGET",
"SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS",
"SECURITY_SCANNER_API_VERSION",
"REPO_MIRROR_INTERVAL",
"DATA_MODEL_CACHE_CONFIG",
# TODO: move this into the schema once we support signing in QE.
"FEATURE_SIGNING",
"TUF_SERVER",
"V1_ONLY_DOMAIN",
"LOGS_MODEL",
"LOGS_MODEL_CONFIG",
"APP_REGISTRY_RESULTS_LIMIT",
"V3_UPGRADE_MODE", # Deprecated old flag
"ACCOUNT_RECOVERY_MODE",
}
CONFIG_SCHEMA = {
"type": "object",
"description": "Schema for Quay configuration",
"required": [
"PREFERRED_URL_SCHEME",
"SERVER_HOSTNAME",
"DB_URI",
"AUTHENTICATION_TYPE",
"DISTRIBUTED_STORAGE_CONFIG",
"BUILDLOGS_REDIS",
"USER_EVENTS_REDIS",
"DISTRIBUTED_STORAGE_PREFERENCE",
"DEFAULT_TAG_EXPIRATION",
"TAG_EXPIRATION_OPTIONS",
],
"properties": {
"REGISTRY_STATE": {
"type": "string",
"description": "The state of the registry.",
"enum": ["normal", "readonly"],
"x-example": "readonly",
},
# Hosting.
"PREFERRED_URL_SCHEME": {
"type": "string",
"description": "The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`",
"enum": ["http", "https"],
"x-example": "https",
},
"SERVER_HOSTNAME": {
"type": "string",
"description": "The URL at which Quay is accessible, without the scheme.",
"x-example": "quay.io",
},
"EXTERNAL_TLS_TERMINATION": {
"type": "boolean",
"description": "If TLS is supported, but terminated at a layer before Quay, must be true.",
"x-example": True,
},
# SSL/TLS.
"SSL_CIPHERS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL ciphers to enabled and disabled",
"x-example": ["CAMELLIA", "!3DES"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers",
},
"SSL_PROTOCOLS": {
"type": "array",
"description": "If specified, the nginx-defined list of SSL protocols to enabled and disabled",
"x-example": ["TLSv1.1", "TLSv1.2"],
"x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols",
},
# User-visible configuration.
"REGISTRY_TITLE": {
"type": "string",
"description": "If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "Corp Container Service",
},
"REGISTRY_TITLE_SHORT": {
"type": "string",
"description": "If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.",
"x-example": "CCS",
},
"CONTACT_INFO": {
"type": "array",
"uniqueItems": True,
"description": "If specified, contact information to display on the contact page. "
+ "If only a single piece of contact information is specified, the contact footer will link directly.",
"items": [
{
"type": "string",
"pattern": "^mailto:(.)+$",
"x-example": "mailto:admin@example.com",
"description": "Adds a link to send an e-mail",
},
{
"type": "string",
"pattern": "^irc://(.)+$",
"x-example": "irc://chat.freenode.net:6665/quay",
"description": "Adds a link to visit an IRC chat room",
},
{
"type": "string",
"pattern": "^tel:(.)+$",
"x-example": "tel:+1-888-930-3475",
"description": "Adds a link to call a phone number",
},
{
"type": "string",
"pattern": "^http(s)?://(.)+$",
"x-example": "https://twitter.com/quayio",
"description": "Adds a link to a defined URL",
},
],
},
"SEARCH_RESULTS_PER_PAGE": {
"type": "number",
"description": "Number of results returned per page by search page. Defaults to 10",
"x-example": 10,
},
"SEARCH_MAX_RESULT_PAGE_COUNT": {
"type": "number",
"description": "Maximum number of pages the user can paginate in search before they are limited. Defaults to 10",
"x-example": 10,
},
# E-mail.
"FEATURE_MAILING": {
"type": "boolean",
"description": "Whether emails are enabled. Defaults to True",
"x-example": True,
},
"MAIL_SERVER": {
"type": "string",
"description": "The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.",
"x-example": "smtp.somedomain.com",
},
"MAIL_USE_TLS": {
"type": "boolean",
"description": "If specified, whether to use TLS for sending e-mails.",
"x-example": True,
},
"MAIL_PORT": {
"type": "number",
"description": "The SMTP port to use. If not specified, defaults to 587.",
"x-example": 588,
},
"MAIL_USERNAME": {
"type": ["string", "null"],
"description": "The SMTP username to use when sending e-mails.",
"x-example": "myuser",
},
"MAIL_PASSWORD": {
"type": ["string", "null"],
"description": "The SMTP password to use when sending e-mails.",
"x-example": "mypassword",
},
"MAIL_DEFAULT_SENDER": {
"type": ["string", "null"],
"description": "If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `admin@example.com`.",
"x-example": "support@myco.com",
},
# Database.
"DB_URI": {
"type": "string",
"description": "The URI at which to access the database, including any credentials.",
"x-example": "mysql+pymysql://username:password@dns.of.database/quay",
"x-reference": "https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495",
},
"DB_CONNECTION_ARGS": {
"type": "object",
"description": "If specified, connection arguments for the database such as timeouts and SSL.",
"properties": {
"threadlocals": {
"type": "boolean",
"description": "Whether to use thread-local connections. Should *ALWAYS* be `true`",
},
"autorollback": {
"type": "boolean",
"description": "Whether to use auto-rollback connections. Should *ALWAYS* be `true`",
},
"ssl": {
"type": "object",
"description": "SSL connection configuration",
"properties": {
"ca": {
"type": "string",
"description": "*Absolute container path* to the CA certificate to use for SSL connections",
"x-example": "conf/stack/ssl-ca-cert.pem",
},
},
"required": ["ca"],
},
},
"required": ["threadlocals", "autorollback"],
},
"ALLOW_PULLS_WITHOUT_STRICT_LOGGING": {
"type": "boolean",
"description": "If true, pulls in which the pull audit log entry cannot be written will "
+ "still succeed. Useful if the database can fallback into a read-only state "
+ "and it is desired for pulls to continue during that time. Defaults to False.",
"x-example": True,
},
# Storage.
"FEATURE_STORAGE_REPLICATION": {
"type": "boolean",
"description": "Whether to automatically replicate between storage engines. Defaults to False",
"x-example": False,
},
"FEATURE_PROXY_STORAGE": {
"type": "boolean",
"description": "Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False",
"x-example": False,
},
"MAXIMUM_LAYER_SIZE": {
"type": "string",
"description": "Maximum allowed size of an image layer. Defaults to 20G",
"x-example": "100G",
"pattern": "^[0-9]+(G|M)$",
},
"DISTRIBUTED_STORAGE_CONFIG": {
"type": "object",
"description": "Configuration for storage engine(s) to use in Quay. Each key is a unique ID"
+ " for a storage engine, with the value being a tuple of the type and "
+ " configuration for that engine.",
"x-example": {
"local_storage": ["LocalStorage", {"storage_path": "some/path/"}],
},
"items": {
"type": "array",
},
},
"DISTRIBUTED_STORAGE_PREFERENCE": {
"type": "array",
"description": "The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to "
+ "use. A preferred engine means it is first checked for pullig and images are "
+ "pushed to it.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS": {
"type": "array",
"description": "The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose "
+ "images should be fully replicated, by default, to all other storage engines.",
"items": {
"type": "string",
"uniqueItems": True,
},
"x-example": ["s3_us_east", "s3_us_west"],
},
"USERFILES_LOCATION": {
"type": "string",
"description": "ID of the storage engine in which to place user-uploaded files",
"x-example": "s3_us_east",
},
"USERFILES_PATH": {
"type": "string",
"description": "Path under storage in which to place user-uploaded files",
"x-example": "userfiles",
},
"ACTION_LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If action log archiving is enabled, the storage engine in which to place the "
+ "archived data.",
"x-example": "s3_us_east",
},
"ACTION_LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If action log archiving is enabled, the path in storage in which to place the "
+ "archived data.",
"x-example": "archives/actionlogs",
},
"ACTION_LOG_ROTATION_THRESHOLD": {
"type": "string",
"description": "If action log archiving is enabled, the time interval after which to "
+ "archive data.",
"x-example": "30d",
},
"LOG_ARCHIVE_LOCATION": {
"type": "string",
"description": "If builds are enabled, the storage engine in which to place the "
+ "archived build logs.",
"x-example": "s3_us_east",
},
"LOG_ARCHIVE_PATH": {
"type": "string",
"description": "If builds are enabled, the path in storage in which to place the "
+ "archived build logs.",
"x-example": "archives/buildlogs",
},
# Authentication.
"AUTHENTICATION_TYPE": {
"type": "string",
"description": "The authentication engine to use for credential authentication.",
"x-example": "Database",
"enum": ["Database", "LDAP", "JWT", "Keystone", "OIDC", "AppToken"],
},
"SUPER_USERS": {
"type": "array",
"description": "Quay usernames of those users to be granted superuser privileges",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"DIRECT_OAUTH_CLIENTID_WHITELIST": {
"type": "array",
"description": "A list of client IDs of *Quay-managed* applications that are allowed "
+ "to perform direct OAuth approval without user approval.",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html",
"uniqueItems": True,
"items": {
"type": "string",
},
},
# Redis.
"BUILDLOGS_REDIS": {
"type": "object",
"description": "Connection information for Redis for build logs caching",
"required": ["host"],
"properties": {
"host": {
"type": "string",
"description": "The hostname at which Redis is accessible",
"x-example": "my.redis.cluster",
},
"port": {
"type": "number",
"description": "The port at which Redis is accessible",
"x-example": 1234,
},
"password": {
"type": "string",
"description": "The password to connect to the Redis instance",
"x-example": "mypassword",
},
},
},
"USER_EVENTS_REDIS": {
"type": "object",
"description": "Connection information for Redis for user event handling",
"required": ["host"],
"properties": {
"host": {
"type": "string",
"description": "The hostname at which Redis is accessible",
"x-example": "my.redis.cluster",
},
"port": {
"type": "number",
"description": "The port at which Redis is accessible",
"x-example": 1234,
},
"password": {
"type": "string",
"description": "The password to connect to the Redis instance",
"x-example": "mypassword",
},
},
},
# OAuth configuration.
"GITHUB_LOGIN_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using GitHub (Enterprise) as an external login provider",
"required": ["CLIENT_ID", "CLIENT_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-auth.html",
"properties": {
"GITHUB_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) being hit",
"x-example": "https://github.com/",
},
"API_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com",
"x-example": "https://api.github.com/",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG",
"x-example": "0e8dbe15c4c7630b6780",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"ORG_RESTRICT": {
"type": "boolean",
"description": "If true, only users within the organization whitelist can login using this provider",
"x-example": True,
},
"ALLOWED_ORGANIZATIONS": {
"type": "array",
"description": "The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option",
"uniqueItems": True,
"items": {
"type": "string",
},
},
},
},
"BITBUCKET_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using BitBucket for build triggers",
"required": ["CONSUMER_KEY", "CONSUMER_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html",
"properties": {
"CONSUMER_KEY": {
"type": "string",
"description": "The registered consumer key (client ID) for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CONSUMER_SECRET": {
"type": "string",
"description": "The registered consumer secret (client secret) for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"GITHUB_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using GitHub (Enterprise) for build triggers",
"required": ["GITHUB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"],
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-build.html",
"properties": {
"GITHUB_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) being hit",
"x-example": "https://github.com/",
},
"API_ENDPOINT": {
"type": "string",
"description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com",
"x-example": "https://api.github.com/",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG",
"x-example": "0e8dbe15c4c7630b6780",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html",
},
},
},
"GOOGLE_LOGIN_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using Google for external authentication",
"required": ["CLIENT_ID", "CLIENT_SECRET"],
"properties": {
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"GITLAB_TRIGGER_CONFIG": {
"type": ["object", "null"],
"description": "Configuration for using Gitlab (Enterprise) for external authentication",
"required": ["GITLAB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"],
"properties": {
"GITLAB_ENDPOINT": {
"type": "string",
"description": "The endpoint at which Gitlab(Enterprise) is running",
"x-example": "https://gitlab.com",
},
"CLIENT_ID": {
"type": "string",
"description": "The registered client ID for this Quay instance",
"x-example": "0e8dbe15c4c7630b6780",
},
"CLIENT_SECRET": {
"type": "string",
"description": "The registered client secret for this Quay instance",
"x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846",
},
},
},
"BRANDING": {
"type": ["object", "null"],
"description": "Custom branding for logos and URLs in the Quay UI",
"required": ["logo"],
"properties": {
"logo": {
"type": "string",
"description": "Main logo image URL",
"x-example": "/static/img/quay-horizontal-color.svg",
},
"footer_img": {
"type": "string",
"description": "Logo for UI footer",
"x-example": "/static/img/RedHat.svg",
},
"footer_url": {
"type": "string",
"description": "Link for footer image",
"x-example": "https://redhat.com",
},
},
},
"DOCUMENTATION_ROOT": {"type": "string", "description": "Root URL for documentation links"},
# Health.
"HEALTH_CHECKER": {
"description": "The configured health check.",
"x-example": ("RDSAwareHealthCheck", {"access_key": "foo", "secret_key": "bar"}),
},
# Metrics.
"PROMETHEUS_NAMESPACE": {
"type": "string",
"description": "The prefix applied to all exposed Prometheus metrics. Defaults to `quay`",
"x-example": "myregistry",
},
# Misc configuration.
"BLACKLIST_V2_SPEC": {
"type": "string",
"description": "The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`",
"x-reference": "http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec",
"x-example": "<1.8.0",
},
"USER_RECOVERY_TOKEN_LIFETIME": {
"type": "string",
"description": "The length of time a token for recovering a user accounts is valid. Defaults to 30m.",
"x-example": "10m",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"SESSION_COOKIE_SECURE": {
"type": "boolean",
"description": "Whether the `secure` property should be set on session cookies. "
+ "Defaults to False. Recommended to be True for all installations using SSL.",
"x-example": True,
"x-reference": "https://en.wikipedia.org/wiki/Secure_cookies",
},
"PUBLIC_NAMESPACES": {
"type": "array",
"description": "If a namespace is defined in the public namespace list, then it will appear on *all*"
+ " user's repository list pages, regardless of whether that user is a member of the namespace."
+ ' Typically, this is used by an enterprise customer in configuring a set of "well-known"'
+ " namespaces.",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"AVATAR_KIND": {
"type": "string",
"description": "The types of avatars to display, either generated inline (local) or Gravatar (gravatar)",
"enum": ["local", "gravatar"],
},
"V2_PAGINATION_SIZE": {
"type": "number",
"description": "The number of results returned per page in V2 registry APIs",
"x-example": 100,
},
"ENABLE_HEALTH_DEBUG_SECRET": {
"type": ["string", "null"],
"description": "If specified, a secret that can be given to health endpoints to see full debug info when"
+ "not authenticated as a superuser",
"x-example": "somesecrethere",
},
"BROWSER_API_CALLS_XHR_ONLY": {
"type": "boolean",
"description": "If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.",
"x-example": False,
},
# Time machine and tag expiration settings.
"FEATURE_CHANGE_TAG_EXPIRATION": {
"type": "boolean",
"description": "Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.",
"x-example": False,
},
"DEFAULT_TAG_EXPIRATION": {
"type": "string",
"description": "The default, configurable tag expiration time for time machine. Defaults to `2w`.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"TAG_EXPIRATION_OPTIONS": {
"type": "array",
"description": "The options that users can select for expiration of tags in their namespace (if enabled)",
"items": {
"type": "string",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
},
# Team syncing.
"FEATURE_TEAM_SYNCING": {
"type": "boolean",
"description": "Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)",
"x-example": True,
},
"TEAM_RESYNC_STALE_TIME": {
"type": "string",
"description": "If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)",
"x-example": "2h",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP": {
"type": "boolean",
"description": "If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.",
"x-example": True,
},
# Security scanning.
"FEATURE_SECURITY_SCANNER": {
"type": "boolean",
"description": "Whether to turn of/off the security scanner. Defaults to False",
"x-example": False,
"x-reference": "https://coreos.com/quay-enterprise/docs/latest/security-scanning.html",
},
"FEATURE_SECURITY_NOTIFICATIONS": {
"type": "boolean",
"description": "If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False",
"x-example": False,
},
"SECURITY_SCANNER_ENDPOINT": {
"type": "string",
"pattern": "^http(s)?://(.)+$",
"description": "The endpoint for the V2 security scanner",
"x-example": "http://192.168.99.101:6060",
},
"SECURITY_SCANNER_V4_ENDPOINT": {
"type": ["string", "null"],
"pattern": "^http(s)?://(.)+$",
"description": "The endpoint for the V4 security scanner",
"x-example": "http://192.168.99.101:6060",
},
"SECURITY_SCANNER_INDEXING_INTERVAL": {
"type": "number",
"description": "The number of seconds between indexing intervals in the security scanner. Defaults to 30.",
"x-example": 30,
},
"SECURITY_SCANNER_V4_PSK": {
"type": "string",
"description": "A base64 encoded string used to sign JWT(s) on Clair V4 requests. If 'None' jwt signing will not occur.",
"x-example": "PSK",
},
# Repository mirroring
"REPO_MIRROR_INTERVAL": {
"type": "number",
"description": "The number of seconds between checking for repository mirror candidates. Defaults to 30.",
"x-example": 30,
},
# Build
"FEATURE_GITHUB_BUILD": {
"type": "boolean",
"description": "Whether to support GitHub build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_BITBUCKET_BUILD": {
"type": "boolean",
"description": "Whether to support Bitbucket build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_GITLAB_BUILD": {
"type": "boolean",
"description": "Whether to support GitLab build triggers. Defaults to False",
"x-example": False,
},
"FEATURE_BUILD_SUPPORT": {
"type": "boolean",
"description": "Whether to support Dockerfile build. Defaults to True",
"x-example": True,
},
"DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT": {
"type": ["number", "null"],
"description": "If not None, the default maximum number of builds that can be queued in a namespace.",
"x-example": 20,
},
"SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD": {
"type": ["number", "null"],
"description": "If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.",
"x-example": 10,
},
"SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD": {
"type": ["number", "null"],
"description": "If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.",
"x-example": 50,
},
# Nested repository names
"FEATURE_EXTENDED_REPOSITORY_NAMES": {
"type": "boolean",
"description": "Whether repository names can have nested paths (/)",
"x-example": False,
},
# Login
"FEATURE_GITHUB_LOGIN": {
"type": "boolean",
"description": "Whether GitHub login is supported. Defaults to False",
"x-example": False,
},
"FEATURE_GOOGLE_LOGIN": {
"type": "boolean",
"description": "Whether Google login is supported. Defaults to False",
"x-example": False,
},
# Recaptcha
"FEATURE_RECAPTCHA": {
"type": "boolean",
"description": "Whether Recaptcha is necessary for user login and recovery. Defaults to False",
"x-example": False,
"x-reference": "https://www.google.com/recaptcha/intro/",
},
"RECAPTCHA_SITE_KEY": {
"type": ["string", "null"],
"description": "If recaptcha is enabled, the site key for the Recaptcha service",
},
"RECAPTCHA_SECRET_KEY": {
"type": ["string", "null"],
"description": "If recaptcha is enabled, the secret key for the Recaptcha service",
},
# External application tokens.
"FEATURE_APP_SPECIFIC_TOKENS": {
"type": "boolean",
"description": "If enabled, users can create tokens for use by the Docker CLI. Defaults to True",
"x-example": False,
},
"APP_SPECIFIC_TOKEN_EXPIRATION": {
"type": ["string", "null"],
"description": "The expiration for external app tokens. Defaults to None.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
"EXPIRED_APP_SPECIFIC_TOKEN_GC": {
"type": ["string", "null"],
"description": "Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.",
"pattern": "^[0-9]+(w|m|d|h|s)$",
},
# Feature Flag: Garbage collection.
"FEATURE_GARBAGE_COLLECTION": {
"type": "boolean",
"description": "Whether garbage collection of repositories is enabled. Defaults to True",
"x-example": False,
},
# Feature Flag: Rate limits.
"FEATURE_RATE_LIMITS": {
"type": "boolean",
"description": "Whether to enable rate limits on API and registry endpoints. Defaults to False",
"x-example": True,
},
# Feature Flag: Aggregated log retrieval.
"FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL": {
"type": "boolean",
"description": "Whether to allow retrieval of aggregated log counts. Defaults to True",
"x-example": True,
},
# Feature Flag: Log export.
"FEATURE_LOG_EXPORT": {
"type": "boolean",
"description": "Whether to allow exporting of action logs. Defaults to True",
"x-example": True,
},
# Feature Flag: User last accessed.
"FEATURE_USER_LAST_ACCESSED": {
"type": "boolean",
"description": "Whether to record the last time a user was accessed. Defaults to True",
"x-example": True,
},
# Feature Flag: Permanent Sessions.
"FEATURE_PERMANENT_SESSIONS": {
"type": "boolean",
"description": "Whether sessions are permanent. Defaults to True",
"x-example": True,
},
# Feature Flag: Super User Support.
"FEATURE_SUPER_USERS": {
"type": "boolean",
"description": "Whether super users are supported. Defaults to True",
"x-example": True,
},
# Feature Flag: Use FIPS compliant cryptography.
"FEATURE_FIPS": {
"type": "boolean",
"description": "If set to true, Quay will run using FIPS compliant hash functions. Defaults to False",
"x-example": True,
},
# Feature Flag: Anonymous Users.
"FEATURE_ANONYMOUS_ACCESS": {
"type": "boolean",
"description": " Whether to allow anonymous users to browse and pull public repositories. Defaults to True",
"x-example": True,
},
# Feature Flag: User Creation.
"FEATURE_USER_CREATION": {
"type": "boolean",
"description": "Whether users can be created (by non-super users). Defaults to True",
"x-example": True,
},
# Feature Flag: Invite Only User Creation.
"FEATURE_INVITE_ONLY_USER_CREATION": {
"type": "boolean",
"description": "Whether users being created must be invited by another user. Defaults to False",
"x-example": False,
},
# Feature Flag: Encrypted Basic Auth.
"FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH": {
"type": "boolean",
"description": "Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False",
"x-example": False,
},
# Feature Flag: Direct Login.
"FEATURE_DIRECT_LOGIN": {
"type": "boolean",
"description": "Whether users can directly login to the UI. Defaults to True",
"x-example": True,
},
# Feature Flag: Advertising V2.
"FEATURE_ADVERTISE_V2": {
"type": "boolean",
"description": "Whether the v2/ endpoint is visible. Defaults to True",
"x-example": True,
},
# Feature Flag: Log Rotation.
"FEATURE_ACTION_LOG_ROTATION": {
"type": "boolean",
"description": "Whether or not to rotate old action logs to storage. Defaults to False",
"x-example": False,
},
# Feature Flag: ACI Conversion.
"FEATURE_ACI_CONVERSION": {
"type": "boolean",
"description": "Whether to enable conversion to ACIs. Defaults to False",
"x-example": False,
},
# Feature Flag: Library Support.
"FEATURE_LIBRARY_SUPPORT": {
"type": "boolean",
"description": 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True',
"x-example": True,
},
# Feature Flag: Require Team Invite.
"FEATURE_REQUIRE_TEAM_INVITE": {
"type": "boolean",
"description": "Whether to require invitations when adding a user to a team. Defaults to True",
"x-example": True,
},
# Feature Flag: Collecting and Supporting Metadata.
"FEATURE_USER_METADATA": {
"type": "boolean",
"description": "Whether to collect and support user metadata. Defaults to False",
"x-example": False,
},
# Feature Flag: Support App Registry.
"FEATURE_APP_REGISTRY": {
"type": "boolean",
"description": "Whether to enable support for App repositories. Defaults to False",
"x-example": False,
},
# Feature Flag: Read only app registry.
"FEATURE_READONLY_APP_REGISTRY": {
"type": "boolean",
"description": "Whether to App repositories are read-only. Defaults to False",
"x-example": True,
},
# Feature Flag: Public Reposiotires in _catalog Endpoint.
"FEATURE_PUBLIC_CATALOG": {
"type": "boolean",
"description": "If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False",
"x-example": False,
},
# Feature Flag: Reader Build Logs.
"FEATURE_READER_BUILD_LOGS": {
"type": "boolean",
"description": "If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False",
"x-example": False,
},
# Feature Flag: Usernames Autocomplete.
"FEATURE_PARTIAL_USER_AUTOCOMPLETE": {
"type": "boolean",
"description": "If set to true, autocompletion will apply to partial usernames. Defaults to True",
"x-example": True,
},
# Feature Flag: User log access.
"FEATURE_USER_LOG_ACCESS": {
"type": "boolean",
"description": "If set to true, users will have access to audit logs for their namespace. Defaults to False",
"x-example": True,
},
# Feature Flag: User renaming.
"FEATURE_USER_RENAME": {
"type": "boolean",
"description": "If set to true, users can rename their own namespace. Defaults to False",
"x-example": True,
},
# Feature Flag: Username confirmation.
"FEATURE_USERNAME_CONFIRMATION": {
"type": "boolean",
"description": "If set to true, users can confirm their generated usernames. Defaults to True",
"x-example": False,
},
# Feature Flag: V1 push restriction.
"FEATURE_RESTRICTED_V1_PUSH": {
"type": "boolean",
"description": "If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True",
"x-example": False,
},
# Feature Flag: Support Repository Mirroring.
"FEATURE_REPO_MIRROR": {
"type": "boolean",
"description": "Whether to enable support for repository mirroring. Defaults to False",
"x-example": False,
},
"REPO_MIRROR_TLS_VERIFY": {
"type": "boolean",
"description": "Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True",
"x-example": True,
},
"REPO_MIRROR_SERVER_HOSTNAME": {
"type": ["string", "null"],
"description": "Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset",
"x-example": "openshift-quay-service",
},
# Feature Flag: V1 push restriction.
"V1_PUSH_WHITELIST": {
"type": "array",
"description": "The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true.",
"x-example": ["some", "namespaces"],
},
# Logs model
"LOGS_MODEL": {
"type": "string",
"description": "Logs model for action logs",
"enum": ["database", "transition_reads_both_writes_es", "elasticsearch"],
"x-example": "database",
},
"LOGS_MODEL_CONFIG": {
"type": "object",
"description": "Logs model config for action logs",
"x-reference": "https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html",
"properties": {
"producer": {
"type": "string",
"description": "Logs producer if logging to Elasticsearch",
"enum": ["kafka", "elasticsearch", "kinesis_stream"],
"x-example": "kafka",
},
"elasticsearch_config": {
"type": "object",
"description": "Elasticsearch cluster configuration",
"properties": {
"host": {
"type": "string",
"description": "Elasticsearch cluster endpoint",
"x-example": "host.elasticsearch.example",
},
"port": {
"type": "number",
"description": "Elasticsearch cluster endpoint port",
"x-example": 1234,
},
"access_key": {
"type": "string",
"description": "Elasticsearch user (or IAM key for AWS ES)",
"x-example": "some_string",
},
"secret_key": {
"type": "string",
"description": "Elasticsearch password (or IAM secret for AWS ES)",
"x-example": "some_secret_string",
},
"aws_region": {
"type": "string",
"description": "Amazon web service region",
"x-example": "us-east-1",
},
"use_ssl": {
"type": "boolean",
"description": "Use ssl for Elasticsearch. Defaults to True",
"x-example": True,
},
"index_prefix": {
"type": "string",
"description": "Elasticsearch's index prefix",
"x-example": "logentry_",
},
"index_settings": {
"type": "object",
"description": "Elasticsearch's index settings",
},
},
},
"kafka_config": {
"type": "object",
"description": "Kafka cluster configuration",
"properties": {
"bootstrap_servers": {
"type": "array",
"description": "List of Kafka brokers to bootstrap the client from",
"uniqueItems": True,
"items": {
"type": "string",
},
},
"topic": {
"type": "string",
"description": "Kafka topic to publish log entries to",
"x-example": "logentry",
},
"max_block_seconds": {
"type": "number",
"description": "Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable",
"x-example": 10,
},
},
},
"kinesis_stream_config": {
"type": "object",
"description": "AWS Kinesis Stream configuration",
"properties": {
"stream_name": {
"type": "string",
"description": "Kinesis stream to send action logs to",
"x-example": "logentry-kinesis-stream",
},
"aws_region": {
"type": "string",
"description": "AWS region",
"x-example": "us-east-1",
},
"aws_access_key": {
"type": "string",
"description": "AWS access key",
"x-example": "some_access_key",
},
"aws_secret_key": {
"type": "string",
"description": "AWS secret key",
"x-example": "some_secret_key",
},
"connect_timeout": {
"type": "number",
"description": "Number of seconds before timeout when attempting to make a connection",
"x-example": 5,
},
"read_timeout": {
"type": "number",
"description": "Number of seconds before timeout when reading from a connection",
"x-example": 5,
},
"retries": {
"type": "number",
"description": "Max number of attempts made on a single request",
"x-example": 5,
},
"max_pool_connections": {
"type": "number",
"description": "The maximum number of connections to keep in a connection pool",
"x-example": 10,
},
},
},
},
},
# Feature Flag: Blacklist Email Domains
"FEATURE_BLACKLISTED_EMAILS": {
"type": "boolean",
"description": "If set to true, no new User accounts may be created if their email domain is blacklisted.",
"x-example": False,
},
# Blacklisted Email Domains
"BLACKLISTED_EMAIL_DOMAINS": {
"type": "array",
"description": "The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true.",
"x-example": ["example.com", "example.org"],
},
"FRESH_LOGIN_TIMEOUT": {
"type": "string",
"description": "The time after which a fresh login requires users to reenter their password",
"x-example": "5m",
},
# Webhook blacklist.
"WEBHOOK_HOSTNAME_BLACKLIST": {
"type": "array",
"description": "The set of hostnames to disallow from webhooks when validating, beyond localhost",
"x-example": ["somexternaldomain.com"],
},
"CREATE_PRIVATE_REPO_ON_PUSH": {
"type": "boolean",
"description": "Whether new repositories created by push are set to private visibility. Defaults to True.",
"x-example": True,
},
"CREATE_NAMESPACE_ON_PUSH": {
"type": "boolean",
"description": "Whether new push to a non-existent organization creates it. Defaults to False.",
"x-example": False,
},
# Allow first user to be initialized via API
"FEATURE_USER_INITIALIZE": {
"type": "boolean",
"description": "If set to true, the first User account may be created via API /api/v1/user/initialize",
"x-example": False,
},
# OCI artifact types
"ALLOWED_OCI_ARTIFACT_TYPES": {
"type": "object",
"description": "The set of allowed OCI artifact mimetypes and the assiciated layer types",
"x-example": {
"application/vnd.cncf.helm.config.v1+json": ["application/tar+gzip"],
"application/vnd.sylabs.sif.config.v1+json": [
"application/vnd.sylabs.sif.layer.v1.sif"
],
},
},
},
}
| internal_only_properties = {'__module__', '__doc__', 'create_transaction', 'SESSION_COOKIE_NAME', 'SESSION_COOKIE_HTTPONLY', 'SESSION_COOKIE_SAMESITE', 'DATABASE_SECRET_KEY', 'V22_NAMESPACE_BLACKLIST', 'MAXIMUM_CNR_LAYER_SIZE', 'OCI_NAMESPACE_WHITELIST', 'FEATURE_GENERAL_OCI_SUPPORT', 'FEATURE_HELM_OCI_SUPPORT', 'FEATURE_NAMESPACE_GARBAGE_COLLECTION', 'FEATURE_REPOSITORY_GARBAGE_COLLECTION', 'FEATURE_REPOSITORY_ACTION_COUNTER', 'APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST', 'APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST', 'FEATURE_MANIFEST_SIZE_BACKFILL', 'TESTING', 'SEND_FILE_MAX_AGE_DEFAULT', 'DISABLED_FOR_AUDIT_LOGS', 'DISABLED_FOR_PULL_LOGS', 'FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES', 'FEATURE_CLEAR_EXPIRED_RAC_ENTRIES', 'ACTION_LOG_MAX_PAGE', 'NON_RATE_LIMITED_NAMESPACES', 'REPLICATION_QUEUE_NAME', 'DOCKERFILE_BUILD_QUEUE_NAME', 'CHUNK_CLEANUP_QUEUE_NAME', 'SECURITY_SCANNER_ISSUER_NAME', 'NOTIFICATION_QUEUE_NAME', 'REPOSITORY_GC_QUEUE_NAME', 'NAMESPACE_GC_QUEUE_NAME', 'EXPORT_ACTION_LOGS_QUEUE_NAME', 'SECSCAN_V4_NOTIFICATION_QUEUE_NAME', 'FEATURE_BILLING', 'BILLING_TYPE', 'INSTANCE_SERVICE_KEY_LOCATION', 'INSTANCE_SERVICE_KEY_REFRESH', 'INSTANCE_SERVICE_KEY_SERVICE', 'INSTANCE_SERVICE_KEY_KID_LOCATION', 'INSTANCE_SERVICE_KEY_EXPIRATION', 'UNAPPROVED_SERVICE_KEY_TTL_SEC', 'EXPIRED_SERVICE_KEY_TTL_SEC', 'REGISTRY_JWT_AUTH_MAX_FRESH_S', 'SERVICE_LOG_ACCOUNT_ID', 'BUILDLOGS_OPTIONS', 'LIBRARY_NAMESPACE', 'STAGGER_WORKERS', 'QUEUE_WORKER_METRICS_REFRESH_SECONDS', 'PUSH_TEMP_TAG_EXPIRATION_SEC', 'GARBAGE_COLLECTION_FREQUENCY', 'PAGE_TOKEN_KEY', 'BUILD_MANAGER', 'JWTPROXY_AUDIENCE', 'JWTPROXY_SIGNER', 'SECURITY_SCANNER_INDEXING_MIN_ID', 'SECURITY_SCANNER_V4_REINDEX_THRESHOLD', 'STATIC_SITE_BUCKET', 'LABEL_KEY_RESERVED_PREFIXES', 'TEAM_SYNC_WORKER_FREQUENCY', 'JSONIFY_PRETTYPRINT_REGULAR', 'TUF_GUN_PREFIX', 'LOGGING_LEVEL', 'SIGNED_GRANT_EXPIRATION_SEC', 'PROMETHEUS_PUSHGATEWAY_URL', 'DB_TRANSACTION_FACTORY', 'NOTIFICATION_SEND_TIMEOUT', 'QUEUE_METRICS_TYPE', 'MAIL_FAIL_SILENTLY', 'LOCAL_OAUTH_HANDLER', 'USE_CDN', 'ANALYTICS_TYPE', 'LAST_ACCESSED_UPDATE_THRESHOLD_S', 'GREENLET_TRACING', 'EXCEPTION_LOG_TYPE', 'SENTRY_DSN', 'SENTRY_PUBLIC_DSN', 'BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT', 'THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT', 'IP_DATA_API_KEY', 'SECURITY_SCANNER_ENDPOINT_BATCH', 'SECURITY_SCANNER_API_TIMEOUT_SECONDS', 'SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS', 'SECURITY_SCANNER_ENGINE_VERSION_TARGET', 'SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS', 'SECURITY_SCANNER_API_VERSION', 'REPO_MIRROR_INTERVAL', 'DATA_MODEL_CACHE_CONFIG', 'FEATURE_SIGNING', 'TUF_SERVER', 'V1_ONLY_DOMAIN', 'LOGS_MODEL', 'LOGS_MODEL_CONFIG', 'APP_REGISTRY_RESULTS_LIMIT', 'V3_UPGRADE_MODE', 'ACCOUNT_RECOVERY_MODE'}
config_schema = {'type': 'object', 'description': 'Schema for Quay configuration', 'required': ['PREFERRED_URL_SCHEME', 'SERVER_HOSTNAME', 'DB_URI', 'AUTHENTICATION_TYPE', 'DISTRIBUTED_STORAGE_CONFIG', 'BUILDLOGS_REDIS', 'USER_EVENTS_REDIS', 'DISTRIBUTED_STORAGE_PREFERENCE', 'DEFAULT_TAG_EXPIRATION', 'TAG_EXPIRATION_OPTIONS'], 'properties': {'REGISTRY_STATE': {'type': 'string', 'description': 'The state of the registry.', 'enum': ['normal', 'readonly'], 'x-example': 'readonly'}, 'PREFERRED_URL_SCHEME': {'type': 'string', 'description': 'The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`', 'enum': ['http', 'https'], 'x-example': 'https'}, 'SERVER_HOSTNAME': {'type': 'string', 'description': 'The URL at which Quay is accessible, without the scheme.', 'x-example': 'quay.io'}, 'EXTERNAL_TLS_TERMINATION': {'type': 'boolean', 'description': 'If TLS is supported, but terminated at a layer before Quay, must be true.', 'x-example': True}, 'SSL_CIPHERS': {'type': 'array', 'description': 'If specified, the nginx-defined list of SSL ciphers to enabled and disabled', 'x-example': ['CAMELLIA', '!3DES'], 'x-reference': 'http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers'}, 'SSL_PROTOCOLS': {'type': 'array', 'description': 'If specified, the nginx-defined list of SSL protocols to enabled and disabled', 'x-example': ['TLSv1.1', 'TLSv1.2'], 'x-reference': 'http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols'}, 'REGISTRY_TITLE': {'type': 'string', 'description': 'If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.', 'x-example': 'Corp Container Service'}, 'REGISTRY_TITLE_SHORT': {'type': 'string', 'description': 'If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.', 'x-example': 'CCS'}, 'CONTACT_INFO': {'type': 'array', 'uniqueItems': True, 'description': 'If specified, contact information to display on the contact page. ' + 'If only a single piece of contact information is specified, the contact footer will link directly.', 'items': [{'type': 'string', 'pattern': '^mailto:(.)+$', 'x-example': 'mailto:admin@example.com', 'description': 'Adds a link to send an e-mail'}, {'type': 'string', 'pattern': '^irc://(.)+$', 'x-example': 'irc://chat.freenode.net:6665/quay', 'description': 'Adds a link to visit an IRC chat room'}, {'type': 'string', 'pattern': '^tel:(.)+$', 'x-example': 'tel:+1-888-930-3475', 'description': 'Adds a link to call a phone number'}, {'type': 'string', 'pattern': '^http(s)?://(.)+$', 'x-example': 'https://twitter.com/quayio', 'description': 'Adds a link to a defined URL'}]}, 'SEARCH_RESULTS_PER_PAGE': {'type': 'number', 'description': 'Number of results returned per page by search page. Defaults to 10', 'x-example': 10}, 'SEARCH_MAX_RESULT_PAGE_COUNT': {'type': 'number', 'description': 'Maximum number of pages the user can paginate in search before they are limited. Defaults to 10', 'x-example': 10}, 'FEATURE_MAILING': {'type': 'boolean', 'description': 'Whether emails are enabled. Defaults to True', 'x-example': True}, 'MAIL_SERVER': {'type': 'string', 'description': 'The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.', 'x-example': 'smtp.somedomain.com'}, 'MAIL_USE_TLS': {'type': 'boolean', 'description': 'If specified, whether to use TLS for sending e-mails.', 'x-example': True}, 'MAIL_PORT': {'type': 'number', 'description': 'The SMTP port to use. If not specified, defaults to 587.', 'x-example': 588}, 'MAIL_USERNAME': {'type': ['string', 'null'], 'description': 'The SMTP username to use when sending e-mails.', 'x-example': 'myuser'}, 'MAIL_PASSWORD': {'type': ['string', 'null'], 'description': 'The SMTP password to use when sending e-mails.', 'x-example': 'mypassword'}, 'MAIL_DEFAULT_SENDER': {'type': ['string', 'null'], 'description': 'If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `admin@example.com`.', 'x-example': 'support@myco.com'}, 'DB_URI': {'type': 'string', 'description': 'The URI at which to access the database, including any credentials.', 'x-example': 'mysql+pymysql://username:password@dns.of.database/quay', 'x-reference': 'https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495'}, 'DB_CONNECTION_ARGS': {'type': 'object', 'description': 'If specified, connection arguments for the database such as timeouts and SSL.', 'properties': {'threadlocals': {'type': 'boolean', 'description': 'Whether to use thread-local connections. Should *ALWAYS* be `true`'}, 'autorollback': {'type': 'boolean', 'description': 'Whether to use auto-rollback connections. Should *ALWAYS* be `true`'}, 'ssl': {'type': 'object', 'description': 'SSL connection configuration', 'properties': {'ca': {'type': 'string', 'description': '*Absolute container path* to the CA certificate to use for SSL connections', 'x-example': 'conf/stack/ssl-ca-cert.pem'}}, 'required': ['ca']}}, 'required': ['threadlocals', 'autorollback']}, 'ALLOW_PULLS_WITHOUT_STRICT_LOGGING': {'type': 'boolean', 'description': 'If true, pulls in which the pull audit log entry cannot be written will ' + 'still succeed. Useful if the database can fallback into a read-only state ' + 'and it is desired for pulls to continue during that time. Defaults to False.', 'x-example': True}, 'FEATURE_STORAGE_REPLICATION': {'type': 'boolean', 'description': 'Whether to automatically replicate between storage engines. Defaults to False', 'x-example': False}, 'FEATURE_PROXY_STORAGE': {'type': 'boolean', 'description': 'Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False', 'x-example': False}, 'MAXIMUM_LAYER_SIZE': {'type': 'string', 'description': 'Maximum allowed size of an image layer. Defaults to 20G', 'x-example': '100G', 'pattern': '^[0-9]+(G|M)$'}, 'DISTRIBUTED_STORAGE_CONFIG': {'type': 'object', 'description': 'Configuration for storage engine(s) to use in Quay. Each key is a unique ID' + ' for a storage engine, with the value being a tuple of the type and ' + ' configuration for that engine.', 'x-example': {'local_storage': ['LocalStorage', {'storage_path': 'some/path/'}]}, 'items': {'type': 'array'}}, 'DISTRIBUTED_STORAGE_PREFERENCE': {'type': 'array', 'description': 'The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to ' + 'use. A preferred engine means it is first checked for pullig and images are ' + 'pushed to it.', 'items': {'type': 'string', 'uniqueItems': True}, 'x-example': ['s3_us_east', 's3_us_west']}, 'DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS': {'type': 'array', 'description': 'The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose ' + 'images should be fully replicated, by default, to all other storage engines.', 'items': {'type': 'string', 'uniqueItems': True}, 'x-example': ['s3_us_east', 's3_us_west']}, 'USERFILES_LOCATION': {'type': 'string', 'description': 'ID of the storage engine in which to place user-uploaded files', 'x-example': 's3_us_east'}, 'USERFILES_PATH': {'type': 'string', 'description': 'Path under storage in which to place user-uploaded files', 'x-example': 'userfiles'}, 'ACTION_LOG_ARCHIVE_LOCATION': {'type': 'string', 'description': 'If action log archiving is enabled, the storage engine in which to place the ' + 'archived data.', 'x-example': 's3_us_east'}, 'ACTION_LOG_ARCHIVE_PATH': {'type': 'string', 'description': 'If action log archiving is enabled, the path in storage in which to place the ' + 'archived data.', 'x-example': 'archives/actionlogs'}, 'ACTION_LOG_ROTATION_THRESHOLD': {'type': 'string', 'description': 'If action log archiving is enabled, the time interval after which to ' + 'archive data.', 'x-example': '30d'}, 'LOG_ARCHIVE_LOCATION': {'type': 'string', 'description': 'If builds are enabled, the storage engine in which to place the ' + 'archived build logs.', 'x-example': 's3_us_east'}, 'LOG_ARCHIVE_PATH': {'type': 'string', 'description': 'If builds are enabled, the path in storage in which to place the ' + 'archived build logs.', 'x-example': 'archives/buildlogs'}, 'AUTHENTICATION_TYPE': {'type': 'string', 'description': 'The authentication engine to use for credential authentication.', 'x-example': 'Database', 'enum': ['Database', 'LDAP', 'JWT', 'Keystone', 'OIDC', 'AppToken']}, 'SUPER_USERS': {'type': 'array', 'description': 'Quay usernames of those users to be granted superuser privileges', 'uniqueItems': True, 'items': {'type': 'string'}}, 'DIRECT_OAUTH_CLIENTID_WHITELIST': {'type': 'array', 'description': 'A list of client IDs of *Quay-managed* applications that are allowed ' + 'to perform direct OAuth approval without user approval.', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html', 'uniqueItems': True, 'items': {'type': 'string'}}, 'BUILDLOGS_REDIS': {'type': 'object', 'description': 'Connection information for Redis for build logs caching', 'required': ['host'], 'properties': {'host': {'type': 'string', 'description': 'The hostname at which Redis is accessible', 'x-example': 'my.redis.cluster'}, 'port': {'type': 'number', 'description': 'The port at which Redis is accessible', 'x-example': 1234}, 'password': {'type': 'string', 'description': 'The password to connect to the Redis instance', 'x-example': 'mypassword'}}}, 'USER_EVENTS_REDIS': {'type': 'object', 'description': 'Connection information for Redis for user event handling', 'required': ['host'], 'properties': {'host': {'type': 'string', 'description': 'The hostname at which Redis is accessible', 'x-example': 'my.redis.cluster'}, 'port': {'type': 'number', 'description': 'The port at which Redis is accessible', 'x-example': 1234}, 'password': {'type': 'string', 'description': 'The password to connect to the Redis instance', 'x-example': 'mypassword'}}}, 'GITHUB_LOGIN_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using GitHub (Enterprise) as an external login provider', 'required': ['CLIENT_ID', 'CLIENT_SECRET'], 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-auth.html', 'properties': {'GITHUB_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) being hit', 'x-example': 'https://github.com/'}, 'API_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', 'x-example': 'https://api.github.com/'}, 'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG', 'x-example': '0e8dbe15c4c7630b6780', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}, 'ORG_RESTRICT': {'type': 'boolean', 'description': 'If true, only users within the organization whitelist can login using this provider', 'x-example': True}, 'ALLOWED_ORGANIZATIONS': {'type': 'array', 'description': 'The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option', 'uniqueItems': True, 'items': {'type': 'string'}}}}, 'BITBUCKET_TRIGGER_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using BitBucket for build triggers', 'required': ['CONSUMER_KEY', 'CONSUMER_SECRET'], 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html', 'properties': {'CONSUMER_KEY': {'type': 'string', 'description': 'The registered consumer key (client ID) for this Quay instance', 'x-example': '0e8dbe15c4c7630b6780'}, 'CONSUMER_SECRET': {'type': 'string', 'description': 'The registered consumer secret (client secret) for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846'}}}, 'GITHUB_TRIGGER_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using GitHub (Enterprise) for build triggers', 'required': ['GITHUB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-build.html', 'properties': {'GITHUB_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) being hit', 'x-example': 'https://github.com/'}, 'API_ENDPOINT': {'type': 'string', 'description': 'The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com', 'x-example': 'https://api.github.com/'}, 'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG', 'x-example': '0e8dbe15c4c7630b6780', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846', 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/github-app.html'}}}, 'GOOGLE_LOGIN_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using Google for external authentication', 'required': ['CLIENT_ID', 'CLIENT_SECRET'], 'properties': {'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance', 'x-example': '0e8dbe15c4c7630b6780'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846'}}}, 'GITLAB_TRIGGER_CONFIG': {'type': ['object', 'null'], 'description': 'Configuration for using Gitlab (Enterprise) for external authentication', 'required': ['GITLAB_ENDPOINT', 'CLIENT_ID', 'CLIENT_SECRET'], 'properties': {'GITLAB_ENDPOINT': {'type': 'string', 'description': 'The endpoint at which Gitlab(Enterprise) is running', 'x-example': 'https://gitlab.com'}, 'CLIENT_ID': {'type': 'string', 'description': 'The registered client ID for this Quay instance', 'x-example': '0e8dbe15c4c7630b6780'}, 'CLIENT_SECRET': {'type': 'string', 'description': 'The registered client secret for this Quay instance', 'x-example': 'e4a58ddd3d7408b7aec109e85564a0d153d3e846'}}}, 'BRANDING': {'type': ['object', 'null'], 'description': 'Custom branding for logos and URLs in the Quay UI', 'required': ['logo'], 'properties': {'logo': {'type': 'string', 'description': 'Main logo image URL', 'x-example': '/static/img/quay-horizontal-color.svg'}, 'footer_img': {'type': 'string', 'description': 'Logo for UI footer', 'x-example': '/static/img/RedHat.svg'}, 'footer_url': {'type': 'string', 'description': 'Link for footer image', 'x-example': 'https://redhat.com'}}}, 'DOCUMENTATION_ROOT': {'type': 'string', 'description': 'Root URL for documentation links'}, 'HEALTH_CHECKER': {'description': 'The configured health check.', 'x-example': ('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'})}, 'PROMETHEUS_NAMESPACE': {'type': 'string', 'description': 'The prefix applied to all exposed Prometheus metrics. Defaults to `quay`', 'x-example': 'myregistry'}, 'BLACKLIST_V2_SPEC': {'type': 'string', 'description': 'The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`', 'x-reference': 'http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec', 'x-example': '<1.8.0'}, 'USER_RECOVERY_TOKEN_LIFETIME': {'type': 'string', 'description': 'The length of time a token for recovering a user accounts is valid. Defaults to 30m.', 'x-example': '10m', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'SESSION_COOKIE_SECURE': {'type': 'boolean', 'description': 'Whether the `secure` property should be set on session cookies. ' + 'Defaults to False. Recommended to be True for all installations using SSL.', 'x-example': True, 'x-reference': 'https://en.wikipedia.org/wiki/Secure_cookies'}, 'PUBLIC_NAMESPACES': {'type': 'array', 'description': 'If a namespace is defined in the public namespace list, then it will appear on *all*' + " user's repository list pages, regardless of whether that user is a member of the namespace." + ' Typically, this is used by an enterprise customer in configuring a set of "well-known"' + ' namespaces.', 'uniqueItems': True, 'items': {'type': 'string'}}, 'AVATAR_KIND': {'type': 'string', 'description': 'The types of avatars to display, either generated inline (local) or Gravatar (gravatar)', 'enum': ['local', 'gravatar']}, 'V2_PAGINATION_SIZE': {'type': 'number', 'description': 'The number of results returned per page in V2 registry APIs', 'x-example': 100}, 'ENABLE_HEALTH_DEBUG_SECRET': {'type': ['string', 'null'], 'description': 'If specified, a secret that can be given to health endpoints to see full debug info when' + 'not authenticated as a superuser', 'x-example': 'somesecrethere'}, 'BROWSER_API_CALLS_XHR_ONLY': {'type': 'boolean', 'description': 'If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.', 'x-example': False}, 'FEATURE_CHANGE_TAG_EXPIRATION': {'type': 'boolean', 'description': 'Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.', 'x-example': False}, 'DEFAULT_TAG_EXPIRATION': {'type': 'string', 'description': 'The default, configurable tag expiration time for time machine. Defaults to `2w`.', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'TAG_EXPIRATION_OPTIONS': {'type': 'array', 'description': 'The options that users can select for expiration of tags in their namespace (if enabled)', 'items': {'type': 'string', 'pattern': '^[0-9]+(w|m|d|h|s)$'}}, 'FEATURE_TEAM_SYNCING': {'type': 'boolean', 'description': 'Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)', 'x-example': True}, 'TEAM_RESYNC_STALE_TIME': {'type': 'string', 'description': 'If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)', 'x-example': '2h', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP': {'type': 'boolean', 'description': 'If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.', 'x-example': True}, 'FEATURE_SECURITY_SCANNER': {'type': 'boolean', 'description': 'Whether to turn of/off the security scanner. Defaults to False', 'x-example': False, 'x-reference': 'https://coreos.com/quay-enterprise/docs/latest/security-scanning.html'}, 'FEATURE_SECURITY_NOTIFICATIONS': {'type': 'boolean', 'description': 'If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False', 'x-example': False}, 'SECURITY_SCANNER_ENDPOINT': {'type': 'string', 'pattern': '^http(s)?://(.)+$', 'description': 'The endpoint for the V2 security scanner', 'x-example': 'http://192.168.99.101:6060'}, 'SECURITY_SCANNER_V4_ENDPOINT': {'type': ['string', 'null'], 'pattern': '^http(s)?://(.)+$', 'description': 'The endpoint for the V4 security scanner', 'x-example': 'http://192.168.99.101:6060'}, 'SECURITY_SCANNER_INDEXING_INTERVAL': {'type': 'number', 'description': 'The number of seconds between indexing intervals in the security scanner. Defaults to 30.', 'x-example': 30}, 'SECURITY_SCANNER_V4_PSK': {'type': 'string', 'description': "A base64 encoded string used to sign JWT(s) on Clair V4 requests. If 'None' jwt signing will not occur.", 'x-example': 'PSK'}, 'REPO_MIRROR_INTERVAL': {'type': 'number', 'description': 'The number of seconds between checking for repository mirror candidates. Defaults to 30.', 'x-example': 30}, 'FEATURE_GITHUB_BUILD': {'type': 'boolean', 'description': 'Whether to support GitHub build triggers. Defaults to False', 'x-example': False}, 'FEATURE_BITBUCKET_BUILD': {'type': 'boolean', 'description': 'Whether to support Bitbucket build triggers. Defaults to False', 'x-example': False}, 'FEATURE_GITLAB_BUILD': {'type': 'boolean', 'description': 'Whether to support GitLab build triggers. Defaults to False', 'x-example': False}, 'FEATURE_BUILD_SUPPORT': {'type': 'boolean', 'description': 'Whether to support Dockerfile build. Defaults to True', 'x-example': True}, 'DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT': {'type': ['number', 'null'], 'description': 'If not None, the default maximum number of builds that can be queued in a namespace.', 'x-example': 20}, 'SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD': {'type': ['number', 'null'], 'description': 'If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.', 'x-example': 10}, 'SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD': {'type': ['number', 'null'], 'description': 'If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.', 'x-example': 50}, 'FEATURE_EXTENDED_REPOSITORY_NAMES': {'type': 'boolean', 'description': 'Whether repository names can have nested paths (/)', 'x-example': False}, 'FEATURE_GITHUB_LOGIN': {'type': 'boolean', 'description': 'Whether GitHub login is supported. Defaults to False', 'x-example': False}, 'FEATURE_GOOGLE_LOGIN': {'type': 'boolean', 'description': 'Whether Google login is supported. Defaults to False', 'x-example': False}, 'FEATURE_RECAPTCHA': {'type': 'boolean', 'description': 'Whether Recaptcha is necessary for user login and recovery. Defaults to False', 'x-example': False, 'x-reference': 'https://www.google.com/recaptcha/intro/'}, 'RECAPTCHA_SITE_KEY': {'type': ['string', 'null'], 'description': 'If recaptcha is enabled, the site key for the Recaptcha service'}, 'RECAPTCHA_SECRET_KEY': {'type': ['string', 'null'], 'description': 'If recaptcha is enabled, the secret key for the Recaptcha service'}, 'FEATURE_APP_SPECIFIC_TOKENS': {'type': 'boolean', 'description': 'If enabled, users can create tokens for use by the Docker CLI. Defaults to True', 'x-example': False}, 'APP_SPECIFIC_TOKEN_EXPIRATION': {'type': ['string', 'null'], 'description': 'The expiration for external app tokens. Defaults to None.', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'EXPIRED_APP_SPECIFIC_TOKEN_GC': {'type': ['string', 'null'], 'description': 'Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.', 'pattern': '^[0-9]+(w|m|d|h|s)$'}, 'FEATURE_GARBAGE_COLLECTION': {'type': 'boolean', 'description': 'Whether garbage collection of repositories is enabled. Defaults to True', 'x-example': False}, 'FEATURE_RATE_LIMITS': {'type': 'boolean', 'description': 'Whether to enable rate limits on API and registry endpoints. Defaults to False', 'x-example': True}, 'FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL': {'type': 'boolean', 'description': 'Whether to allow retrieval of aggregated log counts. Defaults to True', 'x-example': True}, 'FEATURE_LOG_EXPORT': {'type': 'boolean', 'description': 'Whether to allow exporting of action logs. Defaults to True', 'x-example': True}, 'FEATURE_USER_LAST_ACCESSED': {'type': 'boolean', 'description': 'Whether to record the last time a user was accessed. Defaults to True', 'x-example': True}, 'FEATURE_PERMANENT_SESSIONS': {'type': 'boolean', 'description': 'Whether sessions are permanent. Defaults to True', 'x-example': True}, 'FEATURE_SUPER_USERS': {'type': 'boolean', 'description': 'Whether super users are supported. Defaults to True', 'x-example': True}, 'FEATURE_FIPS': {'type': 'boolean', 'description': 'If set to true, Quay will run using FIPS compliant hash functions. Defaults to False', 'x-example': True}, 'FEATURE_ANONYMOUS_ACCESS': {'type': 'boolean', 'description': ' Whether to allow anonymous users to browse and pull public repositories. Defaults to True', 'x-example': True}, 'FEATURE_USER_CREATION': {'type': 'boolean', 'description': 'Whether users can be created (by non-super users). Defaults to True', 'x-example': True}, 'FEATURE_INVITE_ONLY_USER_CREATION': {'type': 'boolean', 'description': 'Whether users being created must be invited by another user. Defaults to False', 'x-example': False}, 'FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH': {'type': 'boolean', 'description': 'Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False', 'x-example': False}, 'FEATURE_DIRECT_LOGIN': {'type': 'boolean', 'description': 'Whether users can directly login to the UI. Defaults to True', 'x-example': True}, 'FEATURE_ADVERTISE_V2': {'type': 'boolean', 'description': 'Whether the v2/ endpoint is visible. Defaults to True', 'x-example': True}, 'FEATURE_ACTION_LOG_ROTATION': {'type': 'boolean', 'description': 'Whether or not to rotate old action logs to storage. Defaults to False', 'x-example': False}, 'FEATURE_ACI_CONVERSION': {'type': 'boolean', 'description': 'Whether to enable conversion to ACIs. Defaults to False', 'x-example': False}, 'FEATURE_LIBRARY_SUPPORT': {'type': 'boolean', 'description': 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True', 'x-example': True}, 'FEATURE_REQUIRE_TEAM_INVITE': {'type': 'boolean', 'description': 'Whether to require invitations when adding a user to a team. Defaults to True', 'x-example': True}, 'FEATURE_USER_METADATA': {'type': 'boolean', 'description': 'Whether to collect and support user metadata. Defaults to False', 'x-example': False}, 'FEATURE_APP_REGISTRY': {'type': 'boolean', 'description': 'Whether to enable support for App repositories. Defaults to False', 'x-example': False}, 'FEATURE_READONLY_APP_REGISTRY': {'type': 'boolean', 'description': 'Whether to App repositories are read-only. Defaults to False', 'x-example': True}, 'FEATURE_PUBLIC_CATALOG': {'type': 'boolean', 'description': 'If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False', 'x-example': False}, 'FEATURE_READER_BUILD_LOGS': {'type': 'boolean', 'description': 'If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False', 'x-example': False}, 'FEATURE_PARTIAL_USER_AUTOCOMPLETE': {'type': 'boolean', 'description': 'If set to true, autocompletion will apply to partial usernames. Defaults to True', 'x-example': True}, 'FEATURE_USER_LOG_ACCESS': {'type': 'boolean', 'description': 'If set to true, users will have access to audit logs for their namespace. Defaults to False', 'x-example': True}, 'FEATURE_USER_RENAME': {'type': 'boolean', 'description': 'If set to true, users can rename their own namespace. Defaults to False', 'x-example': True}, 'FEATURE_USERNAME_CONFIRMATION': {'type': 'boolean', 'description': 'If set to true, users can confirm their generated usernames. Defaults to True', 'x-example': False}, 'FEATURE_RESTRICTED_V1_PUSH': {'type': 'boolean', 'description': 'If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True', 'x-example': False}, 'FEATURE_REPO_MIRROR': {'type': 'boolean', 'description': 'Whether to enable support for repository mirroring. Defaults to False', 'x-example': False}, 'REPO_MIRROR_TLS_VERIFY': {'type': 'boolean', 'description': 'Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True', 'x-example': True}, 'REPO_MIRROR_SERVER_HOSTNAME': {'type': ['string', 'null'], 'description': 'Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset', 'x-example': 'openshift-quay-service'}, 'V1_PUSH_WHITELIST': {'type': 'array', 'description': 'The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true.', 'x-example': ['some', 'namespaces']}, 'LOGS_MODEL': {'type': 'string', 'description': 'Logs model for action logs', 'enum': ['database', 'transition_reads_both_writes_es', 'elasticsearch'], 'x-example': 'database'}, 'LOGS_MODEL_CONFIG': {'type': 'object', 'description': 'Logs model config for action logs', 'x-reference': 'https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html', 'properties': {'producer': {'type': 'string', 'description': 'Logs producer if logging to Elasticsearch', 'enum': ['kafka', 'elasticsearch', 'kinesis_stream'], 'x-example': 'kafka'}, 'elasticsearch_config': {'type': 'object', 'description': 'Elasticsearch cluster configuration', 'properties': {'host': {'type': 'string', 'description': 'Elasticsearch cluster endpoint', 'x-example': 'host.elasticsearch.example'}, 'port': {'type': 'number', 'description': 'Elasticsearch cluster endpoint port', 'x-example': 1234}, 'access_key': {'type': 'string', 'description': 'Elasticsearch user (or IAM key for AWS ES)', 'x-example': 'some_string'}, 'secret_key': {'type': 'string', 'description': 'Elasticsearch password (or IAM secret for AWS ES)', 'x-example': 'some_secret_string'}, 'aws_region': {'type': 'string', 'description': 'Amazon web service region', 'x-example': 'us-east-1'}, 'use_ssl': {'type': 'boolean', 'description': 'Use ssl for Elasticsearch. Defaults to True', 'x-example': True}, 'index_prefix': {'type': 'string', 'description': "Elasticsearch's index prefix", 'x-example': 'logentry_'}, 'index_settings': {'type': 'object', 'description': "Elasticsearch's index settings"}}}, 'kafka_config': {'type': 'object', 'description': 'Kafka cluster configuration', 'properties': {'bootstrap_servers': {'type': 'array', 'description': 'List of Kafka brokers to bootstrap the client from', 'uniqueItems': True, 'items': {'type': 'string'}}, 'topic': {'type': 'string', 'description': 'Kafka topic to publish log entries to', 'x-example': 'logentry'}, 'max_block_seconds': {'type': 'number', 'description': 'Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable', 'x-example': 10}}}, 'kinesis_stream_config': {'type': 'object', 'description': 'AWS Kinesis Stream configuration', 'properties': {'stream_name': {'type': 'string', 'description': 'Kinesis stream to send action logs to', 'x-example': 'logentry-kinesis-stream'}, 'aws_region': {'type': 'string', 'description': 'AWS region', 'x-example': 'us-east-1'}, 'aws_access_key': {'type': 'string', 'description': 'AWS access key', 'x-example': 'some_access_key'}, 'aws_secret_key': {'type': 'string', 'description': 'AWS secret key', 'x-example': 'some_secret_key'}, 'connect_timeout': {'type': 'number', 'description': 'Number of seconds before timeout when attempting to make a connection', 'x-example': 5}, 'read_timeout': {'type': 'number', 'description': 'Number of seconds before timeout when reading from a connection', 'x-example': 5}, 'retries': {'type': 'number', 'description': 'Max number of attempts made on a single request', 'x-example': 5}, 'max_pool_connections': {'type': 'number', 'description': 'The maximum number of connections to keep in a connection pool', 'x-example': 10}}}}}, 'FEATURE_BLACKLISTED_EMAILS': {'type': 'boolean', 'description': 'If set to true, no new User accounts may be created if their email domain is blacklisted.', 'x-example': False}, 'BLACKLISTED_EMAIL_DOMAINS': {'type': 'array', 'description': 'The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true.', 'x-example': ['example.com', 'example.org']}, 'FRESH_LOGIN_TIMEOUT': {'type': 'string', 'description': 'The time after which a fresh login requires users to reenter their password', 'x-example': '5m'}, 'WEBHOOK_HOSTNAME_BLACKLIST': {'type': 'array', 'description': 'The set of hostnames to disallow from webhooks when validating, beyond localhost', 'x-example': ['somexternaldomain.com']}, 'CREATE_PRIVATE_REPO_ON_PUSH': {'type': 'boolean', 'description': 'Whether new repositories created by push are set to private visibility. Defaults to True.', 'x-example': True}, 'CREATE_NAMESPACE_ON_PUSH': {'type': 'boolean', 'description': 'Whether new push to a non-existent organization creates it. Defaults to False.', 'x-example': False}, 'FEATURE_USER_INITIALIZE': {'type': 'boolean', 'description': 'If set to true, the first User account may be created via API /api/v1/user/initialize', 'x-example': False}, 'ALLOWED_OCI_ARTIFACT_TYPES': {'type': 'object', 'description': 'The set of allowed OCI artifact mimetypes and the assiciated layer types', 'x-example': {'application/vnd.cncf.helm.config.v1+json': ['application/tar+gzip'], 'application/vnd.sylabs.sif.config.v1+json': ['application/vnd.sylabs.sif.layer.v1.sif']}}}} |
# example file for submodule imports
def divide_me_by_2(x):
return x/2
| def divide_me_by_2(x):
return x / 2 |
class Solution:
def connect(self, root):
nodes = [[root], []]
x = 0
while (nodes[0] and nodes[0][0]) or (nodes[1] and nodes[1][0]):
for i in range(len(nodes[x])):
nodes[x][i].next = None if i == len(nodes[x]) - 1 else nodes[x][i+1]
nodes[(1 + x) % 2].append(nodes[x][i].left)
nodes[(1 + x) % 2].append(nodes[x][i].right)
nodes[x] = []
x = (1 + x) % 2
return root
| class Solution:
def connect(self, root):
nodes = [[root], []]
x = 0
while nodes[0] and nodes[0][0] or (nodes[1] and nodes[1][0]):
for i in range(len(nodes[x])):
nodes[x][i].next = None if i == len(nodes[x]) - 1 else nodes[x][i + 1]
nodes[(1 + x) % 2].append(nodes[x][i].left)
nodes[(1 + x) % 2].append(nodes[x][i].right)
nodes[x] = []
x = (1 + x) % 2
return root |
CELERY_TIMEZONE = 'Europe/Rome'
# The backend used to store task results
CELERY_RESULT_BACKEND = 'rpc://'
# If set to True, result messages will be persistent. This means the messages will not be lost after a broker restart
CELERY_RESULT_PERSISTENT = True
CELERY_ACCEPT_CONTENT=['json', 'pickle']
CELERY_TASK_SERIALIZER='json'
CELERY_RESULT_SERIALIZER='json'
# Broker settings.
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2.0
CELERY_IMPORTS = [ 'app_celery.tasks' ]
CELERYD_STATE_DB = '/var/celery/db/state'
# Enables error emails.
CELERY_SEND_TASK_ERROR_EMAILS = True
# Name and email addresses of recipients
ADMINS = (
('Administrator Name', 'admin@somedoamin.net'),
)
# Email address used as sender (From field).
SERVER_EMAIL = 'no-reply@somedomain.net'
# Mailserver configuration
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
# Send events so the worker can be monitored by tools like celerymon.
CELERY_SEND_EVENTS = True
# If enabled the worker pool can be restarted using the pool_restart remote control command.
CELERYD_POOL_RESTARTS = True
| celery_timezone = 'Europe/Rome'
celery_result_backend = 'rpc://'
celery_result_persistent = True
celery_accept_content = ['json', 'pickle']
celery_task_serializer = 'json'
celery_result_serializer = 'json'
broker_url = 'amqp://guest:guest@localhost:5672//'
broker_heartbeat = 10.0
broker_heartbeat_checkrate = 2.0
celery_imports = ['app_celery.tasks']
celeryd_state_db = '/var/celery/db/state'
celery_send_task_error_emails = True
admins = (('Administrator Name', 'admin@somedoamin.net'),)
server_email = 'no-reply@somedomain.net'
email_host = 'localhost'
email_port = 25
celery_send_events = True
celeryd_pool_restarts = True |
def checkIfMessagerIsBooster(self, user):
"""
Function would be called by Robot class
:param self: instance from Robot
:param user: instance from Discord.User
:return: True if user is a booster
"""
for role in user.roles:
if role == self.boostedRole:
return True
return False
| def check_if_messager_is_booster(self, user):
"""
Function would be called by Robot class
:param self: instance from Robot
:param user: instance from Discord.User
:return: True if user is a booster
"""
for role in user.roles:
if role == self.boostedRole:
return True
return False |
class InputBroker:
"""Abstract class responsible for providing raw values when considering scores"""
def get_input_value(self, consideration, context):
raise NotImplementedError()
| class Inputbroker:
"""Abstract class responsible for providing raw values when considering scores"""
def get_input_value(self, consideration, context):
raise not_implemented_error() |
# colorcodingfor rows(...)
def colornumber(color):
if color == 'd':
return 0
elif color == 'e':
return 1
elif color == 'f':
return 2
elif color == 'g':
return 3
elif color == 'h':
return 4
elif color == 'i':
return 5
elif color == 'j':
return 6
elif color == 'k':
return 7
elif color == 'l':
return 8
else:
return 9 | def colornumber(color):
if color == 'd':
return 0
elif color == 'e':
return 1
elif color == 'f':
return 2
elif color == 'g':
return 3
elif color == 'h':
return 4
elif color == 'i':
return 5
elif color == 'j':
return 6
elif color == 'k':
return 7
elif color == 'l':
return 8
else:
return 9 |
class Solution:
def isIdealPermutation(self, A):
"""
:type A: List[int]
:rtype: bool
"""
size, m = len(A), 0
for i in range(size - 2):
m = max(m, A[i])
if m > A[i + 2]:
return False
return True
| class Solution:
def is_ideal_permutation(self, A):
"""
:type A: List[int]
:rtype: bool
"""
(size, m) = (len(A), 0)
for i in range(size - 2):
m = max(m, A[i])
if m > A[i + 2]:
return False
return True |
[
{
'date': '2018-01-01',
'description': "New Year's Day",
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2018-01-15',
'description': 'Birthday of Martin Luther King, Jr.',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-02-19',
'description': "Washington's Birthday",
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-04-16',
'description': "Patriots' Day",
'locale': 'en-US',
'notes': '',
'region': 'MA',
'type': 'V'
},
{
'date': '2018-04-16',
'description': "Patriots' Day",
'locale': 'en-US',
'notes': '',
'region': 'ME',
'type': 'V'
},
{
'date': '2018-05-28',
'description': 'Memorial Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-07-04',
'description': 'Independence Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2018-09-03',
'description': 'Labor Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-10-08',
'description': 'Columbus Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-11-11',
'description': 'Veterans Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2018-11-22',
'description': 'Thanksgiving Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-11-23',
'description': 'Day after Thanksgiving',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NV'
},
{
'date': '2018-12-24',
'description': 'Christmas Eve',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2018-12-25',
'description': 'Christmas Day',
'locale': 'en-US',
'notes': '',
'region': '',
'type': 'NRF'
}
] | [{'date': '2018-01-01', 'description': "New Year's Day", 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF'}, {'date': '2018-01-15', 'description': 'Birthday of Martin Luther King, Jr.', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-02-19', 'description': "Washington's Birthday", 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-04-16', 'description': "Patriots' Day", 'locale': 'en-US', 'notes': '', 'region': 'MA', 'type': 'V'}, {'date': '2018-04-16', 'description': "Patriots' Day", 'locale': 'en-US', 'notes': '', 'region': 'ME', 'type': 'V'}, {'date': '2018-05-28', 'description': 'Memorial Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-07-04', 'description': 'Independence Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF'}, {'date': '2018-09-03', 'description': 'Labor Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-10-08', 'description': 'Columbus Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-11-11', 'description': 'Veterans Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF'}, {'date': '2018-11-22', 'description': 'Thanksgiving Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-11-23', 'description': 'Day after Thanksgiving', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV'}, {'date': '2018-12-24', 'description': 'Christmas Eve', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NRF'}, {'date': '2018-12-25', 'description': 'Christmas Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NRF'}] |
# A function to get the desired metrics while working with multiple model training procedures
def print_classification_metrics(y_train, train_pred, y_test, test_pred, return_performance=True):
dict_performance = {'Training Accuracy: ': accuracy_score(y_train, train_pred),
'Training f1-score: ': f1_score(y_train, train_pred),
'Accuracy: ': accuracy_score(y_test, test_pred),
'Precision: ': precision_score(y_test, test_pred),
'Recall: ': recall_score(y_test, test_pred),
'f1-score: ': f1_score(y_test, test_pred)}
for key, value in dict_performance.items():
print("{} : {}".format(key, value))
if return_performance:
return dict_performance
| def print_classification_metrics(y_train, train_pred, y_test, test_pred, return_performance=True):
dict_performance = {'Training Accuracy: ': accuracy_score(y_train, train_pred), 'Training f1-score: ': f1_score(y_train, train_pred), 'Accuracy: ': accuracy_score(y_test, test_pred), 'Precision: ': precision_score(y_test, test_pred), 'Recall: ': recall_score(y_test, test_pred), 'f1-score: ': f1_score(y_test, test_pred)}
for (key, value) in dict_performance.items():
print('{} : {}'.format(key, value))
if return_performance:
return dict_performance |
#!/usr/bin/env python
def part_one(values: list[int]) -> int:
count = sum(values[index] < values[index + 1] for index in range(len(values) - 1))
return count
def part_two(values: list[int]) -> int:
summed_list = list(sum(three) for three in zip(values, values[1:], values[2:]))
count = sum(summed_list[index] < summed_list[index + 1] for index in range(len(summed_list) - 1))
return count
if __name__ == '__main__':
values_: list[int] = [int(row) for row in open("../../../input.txt").readlines()]
print(part_one(values=values_))
print(part_two(values=values_))
| def part_one(values: list[int]) -> int:
count = sum((values[index] < values[index + 1] for index in range(len(values) - 1)))
return count
def part_two(values: list[int]) -> int:
summed_list = list((sum(three) for three in zip(values, values[1:], values[2:])))
count = sum((summed_list[index] < summed_list[index + 1] for index in range(len(summed_list) - 1)))
return count
if __name__ == '__main__':
values_: list[int] = [int(row) for row in open('../../../input.txt').readlines()]
print(part_one(values=values_))
print(part_two(values=values_)) |
"""Top-level package for Client 1C for Time Sheet."""
__author__ = """Nick K Sabinin"""
__email__ = 'sabnk@optictelecom.ru'
__version__ = '0.1.0'
| """Top-level package for Client 1C for Time Sheet."""
__author__ = 'Nick K Sabinin'
__email__ = 'sabnk@optictelecom.ru'
__version__ = '0.1.0' |
# Usage: gunicorn ProductCatalog.wsgi --bind 0.0.0.0:$PORT --config deploy/gunicorn.conf.py
# Max number of pending connections.
backlog = 1024
# Number of workers spawned for request handling.
workers = 1
# Standard type of workers.
worker_class = 'sync'
# Kill worker if it does not notify the master process in this number of seconds.
timeout = 30
# Log file location.
logfile = '/var/log/productcatalog-gunicorn.log'
# The granularity of log output.
loglevel = 'info'
| backlog = 1024
workers = 1
worker_class = 'sync'
timeout = 30
logfile = '/var/log/productcatalog-gunicorn.log'
loglevel = 'info' |
# NOTE: This objects are used directly in the external-notification-data and vulnerability-service
# on the frontend, so be careful with changing their existing keys.
PRIORITY_LEVELS = {
"Unknown": {
"title": "Unknown",
"value": "Unknown",
"index": 5,
"level": "info",
"color": "#9B9B9B",
"score": 0,
"description": "Unknown is either a security problem that has not been assigned to a priority"
+ " yet or a priority that our system did not recognize",
"banner_required": False,
},
"Negligible": {
"title": "Negligible",
"value": "Negligible",
"index": 4,
"level": "info",
"color": "#9B9B9B",
"score": 1,
"description": "Negligible is technically a security problem, but is only theoretical "
+ "in nature, requires a very special situation, has almost no install base, "
+ "or does no real damage.",
"banner_required": False,
},
"Low": {
"title": "Low",
"value": "Low",
"index": 3,
"level": "warning",
"color": "#F8CA1C",
"score": 3,
"description": "Low is a security problem, but is hard to exploit due to environment, "
+ "requires a user-assisted attack, a small install base, or does very little"
+ " damage.",
"banner_required": False,
},
"Medium": {
"title": "Medium",
"value": "Medium",
"index": 2,
"level": "warning",
"color": "#FCA657",
"score": 6,
"description": "Medium is a real security problem, and is exploitable for many people. "
+ "Includes network daemon denial of service attacks, cross-site scripting, and "
+ "gaining user privileges.",
"banner_required": False,
},
"High": {
"title": "High",
"value": "High",
"index": 1,
"level": "warning",
"color": "#F77454",
"score": 9,
"description": "High is a real problem, exploitable for many people in a default "
+ "installation. Includes serious remote denial of services, local root "
+ "privilege escalations, or data loss.",
"banner_required": False,
},
"Critical": {
"title": "Critical",
"value": "Critical",
"index": 0,
"level": "error",
"color": "#D64456",
"score": 10,
"description": "Critical is a world-burning problem, exploitable for nearly all people in "
+ "a installation of the package. Includes remote root privilege escalations, "
+ "or massive data loss.",
"banner_required": False,
},
}
def get_priority_for_index(index):
try:
int_index = int(index)
except ValueError:
return "Unknown"
for priority in PRIORITY_LEVELS:
if PRIORITY_LEVELS[priority]["index"] == int_index:
return priority
return "Unknown"
def get_priority_from_cvssscore(score):
try:
if 0 < score < 4:
return PRIORITY_LEVELS["Low"]["value"]
if 4 <= score < 7:
return PRIORITY_LEVELS["Medium"]["value"]
if 7 <= score < 9:
return PRIORITY_LEVELS["High"]["value"]
if 9 <= score < 10:
return PRIORITY_LEVELS["Critical"]["value"]
except ValueError:
return "Unknown"
return "Unknown"
def fetch_vuln_severity(vuln, enrichments):
if (
vuln["normalized_severity"]
and vuln["normalized_severity"] != PRIORITY_LEVELS["Unknown"]["value"]
):
return vuln["normalized_severity"]
if enrichments.get(vuln["id"], {}).get("baseScore", None):
return get_priority_from_cvssscore(enrichments[vuln["id"]]["baseScore"])
return PRIORITY_LEVELS["Unknown"]["value"]
| priority_levels = {'Unknown': {'title': 'Unknown', 'value': 'Unknown', 'index': 5, 'level': 'info', 'color': '#9B9B9B', 'score': 0, 'description': 'Unknown is either a security problem that has not been assigned to a priority' + ' yet or a priority that our system did not recognize', 'banner_required': False}, 'Negligible': {'title': 'Negligible', 'value': 'Negligible', 'index': 4, 'level': 'info', 'color': '#9B9B9B', 'score': 1, 'description': 'Negligible is technically a security problem, but is only theoretical ' + 'in nature, requires a very special situation, has almost no install base, ' + 'or does no real damage.', 'banner_required': False}, 'Low': {'title': 'Low', 'value': 'Low', 'index': 3, 'level': 'warning', 'color': '#F8CA1C', 'score': 3, 'description': 'Low is a security problem, but is hard to exploit due to environment, ' + 'requires a user-assisted attack, a small install base, or does very little' + ' damage.', 'banner_required': False}, 'Medium': {'title': 'Medium', 'value': 'Medium', 'index': 2, 'level': 'warning', 'color': '#FCA657', 'score': 6, 'description': 'Medium is a real security problem, and is exploitable for many people. ' + 'Includes network daemon denial of service attacks, cross-site scripting, and ' + 'gaining user privileges.', 'banner_required': False}, 'High': {'title': 'High', 'value': 'High', 'index': 1, 'level': 'warning', 'color': '#F77454', 'score': 9, 'description': 'High is a real problem, exploitable for many people in a default ' + 'installation. Includes serious remote denial of services, local root ' + 'privilege escalations, or data loss.', 'banner_required': False}, 'Critical': {'title': 'Critical', 'value': 'Critical', 'index': 0, 'level': 'error', 'color': '#D64456', 'score': 10, 'description': 'Critical is a world-burning problem, exploitable for nearly all people in ' + 'a installation of the package. Includes remote root privilege escalations, ' + 'or massive data loss.', 'banner_required': False}}
def get_priority_for_index(index):
try:
int_index = int(index)
except ValueError:
return 'Unknown'
for priority in PRIORITY_LEVELS:
if PRIORITY_LEVELS[priority]['index'] == int_index:
return priority
return 'Unknown'
def get_priority_from_cvssscore(score):
try:
if 0 < score < 4:
return PRIORITY_LEVELS['Low']['value']
if 4 <= score < 7:
return PRIORITY_LEVELS['Medium']['value']
if 7 <= score < 9:
return PRIORITY_LEVELS['High']['value']
if 9 <= score < 10:
return PRIORITY_LEVELS['Critical']['value']
except ValueError:
return 'Unknown'
return 'Unknown'
def fetch_vuln_severity(vuln, enrichments):
if vuln['normalized_severity'] and vuln['normalized_severity'] != PRIORITY_LEVELS['Unknown']['value']:
return vuln['normalized_severity']
if enrichments.get(vuln['id'], {}).get('baseScore', None):
return get_priority_from_cvssscore(enrichments[vuln['id']]['baseScore'])
return PRIORITY_LEVELS['Unknown']['value'] |
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sumEvenGrandparent(self, root):
"""
:type root: TreeNode
:rtype: int
"""
res = [0]
self.traverse(root, None, None, res)
return res[0]
def traverse(self, node, pnode, gpnode, sum_even):
if node:
if gpnode and gpnode.val % 2 == 0:
sum_even[0] += node.val
if node.left:
self.traverse(node.left, node, pnode, sum_even)
if node.right:
self.traverse(node.right, node, pnode, sum_even)
def test_sum_even_grand_parent():
a = TreeNode(6)
b = TreeNode(7)
c = TreeNode(8)
a.left = b
a.right = c
d = TreeNode(2)
e = TreeNode(7)
b.left = d
b.right = e
f = TreeNode(1)
h = TreeNode(3)
c.left = f
c.right = h
i = TreeNode(9)
j = TreeNode(1)
k = TreeNode(4)
m = TreeNode(5)
d.left = i
e.left = j
e.right = k
h.right = m
s = Solution()
assert 18 == s.sumEvenGrandparent(a)
| class Treenode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sum_even_grandparent(self, root):
"""
:type root: TreeNode
:rtype: int
"""
res = [0]
self.traverse(root, None, None, res)
return res[0]
def traverse(self, node, pnode, gpnode, sum_even):
if node:
if gpnode and gpnode.val % 2 == 0:
sum_even[0] += node.val
if node.left:
self.traverse(node.left, node, pnode, sum_even)
if node.right:
self.traverse(node.right, node, pnode, sum_even)
def test_sum_even_grand_parent():
a = tree_node(6)
b = tree_node(7)
c = tree_node(8)
a.left = b
a.right = c
d = tree_node(2)
e = tree_node(7)
b.left = d
b.right = e
f = tree_node(1)
h = tree_node(3)
c.left = f
c.right = h
i = tree_node(9)
j = tree_node(1)
k = tree_node(4)
m = tree_node(5)
d.left = i
e.left = j
e.right = k
h.right = m
s = solution()
assert 18 == s.sumEvenGrandparent(a) |
class solution:
def findNumbers(self, nums=[]):
even = 0
for num in nums:
numString = str(num)
if len(numString) % 2 == 0:
even += 1
return even
if __name__ == "__main__":
sol = solution()
_ = [int(n) for n in input().split()]
print(sol.findNumbers(_))
| class Solution:
def find_numbers(self, nums=[]):
even = 0
for num in nums:
num_string = str(num)
if len(numString) % 2 == 0:
even += 1
return even
if __name__ == '__main__':
sol = solution()
_ = [int(n) for n in input().split()]
print(sol.findNumbers(_)) |
pressure_arr = [80, 90, 100, 150, 120, 110, 160, 110, 100]
sum = 0
for pressure in pressure_arr:
sum = pressure + sum
length = len(pressure_arr)
mean = sum / length
print("The mean is", mean)
| pressure_arr = [80, 90, 100, 150, 120, 110, 160, 110, 100]
sum = 0
for pressure in pressure_arr:
sum = pressure + sum
length = len(pressure_arr)
mean = sum / length
print('The mean is', mean) |
n = int(input())
families = map(int, input().split())
families = sorted(families)
for i in range(len(families)):
if(i!=len(families)-1):
if(families[i]!=families[i - 1] and families[i]!=families[i + 1]):
print(families[i])
break
else:
print(families[i])
| n = int(input())
families = map(int, input().split())
families = sorted(families)
for i in range(len(families)):
if i != len(families) - 1:
if families[i] != families[i - 1] and families[i] != families[i + 1]:
print(families[i])
break
else:
print(families[i]) |
class Solution:
def expand(self, S: str) -> List[str]:
return sorted(self.dfs(S, ['']))
def dfs(self, s, prev):
if not s:
return prev
n = len(s)
cur = ''
found = False
result = []
for i in range(n):
if s[i].isalpha():
cur += s[i]
continue
if s[i] == '{':
found = True
start = i
break
added = []
for sub in prev:
added.append(sub + cur)
if not found:
return added
end = s.find('}')
chars = s[start + 1: end].split(',')
arr = []
for sub in added:
for ch in chars:
arr.append(sub + ch)
# print(s[end + 1: ])
return self.dfs(s[end + 1: ], arr)
| class Solution:
def expand(self, S: str) -> List[str]:
return sorted(self.dfs(S, ['']))
def dfs(self, s, prev):
if not s:
return prev
n = len(s)
cur = ''
found = False
result = []
for i in range(n):
if s[i].isalpha():
cur += s[i]
continue
if s[i] == '{':
found = True
start = i
break
added = []
for sub in prev:
added.append(sub + cur)
if not found:
return added
end = s.find('}')
chars = s[start + 1:end].split(',')
arr = []
for sub in added:
for ch in chars:
arr.append(sub + ch)
return self.dfs(s[end + 1:], arr) |
def translate(data, char, replacement):
result = data.replace(char, replacement)
print(result)
return result
def includes(data, string):
if string in data:
return True
return False
def start(data, string):
counter = 0
is_it = False
for char in string:
if char == data[counter]:
counter += 1
is_it = True
continue
else:
is_it = False
break
return is_it
def findindex(data, char):
for i in range(0, len(data)):
if char == data[i]:
last_inedx = i
return last_inedx
def remove(data, start_index, count):
start_index = int(start_index)
count = int(count)
stop_index = int(start_index) + int(count)
if len(data) > stop_index:
data = data[0: start_index:] + data[stop_index + 0::]
print(data)
return data
data = input()
command = input()
while command != "End":
command = command.split()
if command[0] == "Lowercase":
data = data.lower()
print(data)
command = input()
continue
elif len(command) == 2:
act = command[0]
a = command[1]
elif len(command) == 3:
act = command[0]
a = command[1]
b = command[2]
if act == "Translate":
data = translate(data, a, b)
elif act == "Includes":
print(includes(data, a))
elif act == "Start":
print(start(data, a))
elif act == "FindIndex":
print(findindex(data, a))
elif act == "Remove":
data = remove(data, a, b)
command = input() | def translate(data, char, replacement):
result = data.replace(char, replacement)
print(result)
return result
def includes(data, string):
if string in data:
return True
return False
def start(data, string):
counter = 0
is_it = False
for char in string:
if char == data[counter]:
counter += 1
is_it = True
continue
else:
is_it = False
break
return is_it
def findindex(data, char):
for i in range(0, len(data)):
if char == data[i]:
last_inedx = i
return last_inedx
def remove(data, start_index, count):
start_index = int(start_index)
count = int(count)
stop_index = int(start_index) + int(count)
if len(data) > stop_index:
data = data[0:start_index] + data[stop_index + 0:]
print(data)
return data
data = input()
command = input()
while command != 'End':
command = command.split()
if command[0] == 'Lowercase':
data = data.lower()
print(data)
command = input()
continue
elif len(command) == 2:
act = command[0]
a = command[1]
elif len(command) == 3:
act = command[0]
a = command[1]
b = command[2]
if act == 'Translate':
data = translate(data, a, b)
elif act == 'Includes':
print(includes(data, a))
elif act == 'Start':
print(start(data, a))
elif act == 'FindIndex':
print(findindex(data, a))
elif act == 'Remove':
data = remove(data, a, b)
command = input() |
L = 25
with open('input') as f:
nums = list(map(int, f.read().split()))
# Part 1
for i in range(L, len(nums)):
pre = nums[i - L:i]
n = nums[i]
d = {}
for p in pre:
if p in d and p != d[p]:
break
d[n - p] = p
else:
print(n)
break
# Part 2
i = 0
j = 2
while j < len(nums):
cont = nums[i:j]
s = sum(cont)
if s > n:
i += 1
elif s < n or j - i < 2:
j += 1
else:
print(min(cont) + max(cont))
break
| l = 25
with open('input') as f:
nums = list(map(int, f.read().split()))
for i in range(L, len(nums)):
pre = nums[i - L:i]
n = nums[i]
d = {}
for p in pre:
if p in d and p != d[p]:
break
d[n - p] = p
else:
print(n)
break
i = 0
j = 2
while j < len(nums):
cont = nums[i:j]
s = sum(cont)
if s > n:
i += 1
elif s < n or j - i < 2:
j += 1
else:
print(min(cont) + max(cont))
break |
class UnexpectedMode(ValueError):
def __init__(self, mode: str) -> None:
super().__init__(
f"Unexpected mode - found '{mode}' but must be 'image' or 'mesh'"
)
| class Unexpectedmode(ValueError):
def __init__(self, mode: str) -> None:
super().__init__(f"Unexpected mode - found '{mode}' but must be 'image' or 'mesh'") |
# -*- coding: utf-8 -*-
"""
Jaccard Index Implementation
@author: AniruddhaMaheshDave
"""
def jaccard_index(str1, str2, n_gram = 2):
"""
Computes the Jaccard Index between two strings
`str1` and `str2`.
#TODO : Write details about Jaccard Index
"""
if str1 == str2:
return 1
len1, len2 = len(str1), len(str2)
if (len1 == 0) or (len2 == 0):
return 0
first_set = set()
second_set = set()
for i in range(len1 - 1):
if (' ' not in str1[i:i+n_gram] and len(str1[i:i+n_gram]) == n_gram):
first_set.add(str1[i:i+n_gram])
for i in range(len2 - 1):
if (' ' not in str2[i:i+n_gram] and len(str2[i:i+n_gram]) == n_gram):
second_set.add(str2[i:i+n_gram])
if first_set and second_set:
intersection_cardinality = len(first_set.intersection(second_set))
union_cardinality = len(first_set.union(second_set))
return intersection_cardinality/float(union_cardinality)
else:
raise Exception("No n-grams found. Choose a lower value of n_gram") | """
Jaccard Index Implementation
@author: AniruddhaMaheshDave
"""
def jaccard_index(str1, str2, n_gram=2):
"""
Computes the Jaccard Index between two strings
`str1` and `str2`.
#TODO : Write details about Jaccard Index
"""
if str1 == str2:
return 1
(len1, len2) = (len(str1), len(str2))
if len1 == 0 or len2 == 0:
return 0
first_set = set()
second_set = set()
for i in range(len1 - 1):
if ' ' not in str1[i:i + n_gram] and len(str1[i:i + n_gram]) == n_gram:
first_set.add(str1[i:i + n_gram])
for i in range(len2 - 1):
if ' ' not in str2[i:i + n_gram] and len(str2[i:i + n_gram]) == n_gram:
second_set.add(str2[i:i + n_gram])
if first_set and second_set:
intersection_cardinality = len(first_set.intersection(second_set))
union_cardinality = len(first_set.union(second_set))
return intersection_cardinality / float(union_cardinality)
else:
raise exception('No n-grams found. Choose a lower value of n_gram') |
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'../../../../../../common_settings.gypi', # Common settings
],
'targets': [
{
'target_name': 'iLBC',
'type': '<(library)',
'dependencies': [
'../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
],
'include_dirs': [
'../interface',
],
'direct_dependent_settings': {
'include_dirs': [
'../interface',
],
},
'sources': [
'../interface/ilbc.h',
'abs_quant.c',
'abs_quant_loop.c',
'augmented_cb_corr.c',
'bw_expand.c',
'cb_construct.c',
'cb_mem_energy.c',
'cb_mem_energy_augmentation.c',
'cb_mem_energy_calc.c',
'cb_search.c',
'cb_search_core.c',
'cb_update_best_index.c',
'chebyshev.c',
'comp_corr.c',
'constants.c',
'create_augmented_vec.c',
'decode.c',
'decode_residual.c',
'decoder_interpolate_lsf.c',
'do_plc.c',
'encode.c',
'energy_inverse.c',
'enh_upsample.c',
'enhancer.c',
'enhancer_interface.c',
'filtered_cb_vecs.c',
'frame_classify.c',
'gain_dequant.c',
'gain_quant.c',
'get_cd_vec.c',
'get_lsp_poly.c',
'get_sync_seq.c',
'hp_input.c',
'hp_output.c',
'ilbc.c',
'index_conv_dec.c',
'index_conv_enc.c',
'init_decode.c',
'init_encode.c',
'interpolate.c',
'interpolate_samples.c',
'lpc_encode.c',
'lsf_check.c',
'lsf_interpolate_to_poly_dec.c',
'lsf_interpolate_to_poly_enc.c',
'lsf_to_lsp.c',
'lsf_to_poly.c',
'lsp_to_lsf.c',
'my_corr.c',
'nearest_neighbor.c',
'pack_bits.c',
'poly_to_lsf.c',
'poly_to_lsp.c',
'refiner.c',
'simple_interpolate_lsf.c',
'simple_lpc_analysis.c',
'simple_lsf_dequant.c',
'simple_lsf_quant.c',
'smooth.c',
'smooth_out_data.c',
'sort_sq.c',
'split_vq.c',
'state_construct.c',
'state_search.c',
'swap_bytes.c',
'unpack_bits.c',
'vq3.c',
'vq4.c',
'window32_w32.c',
'xcorr_coef.c',
'abs_quant.h',
'abs_quant_loop.h',
'augmented_cb_corr.h',
'bw_expand.h',
'cb_construct.h',
'cb_mem_energy.h',
'cb_mem_energy_augmentation.h',
'cb_mem_energy_calc.h',
'cb_search.h',
'cb_search_core.h',
'cb_update_best_index.h',
'chebyshev.h',
'comp_corr.h',
'constants.h',
'create_augmented_vec.h',
'decode.h',
'decode_residual.h',
'decoder_interpolate_lsf.h',
'do_plc.h',
'encode.h',
'energy_inverse.h',
'enh_upsample.h',
'enhancer.h',
'enhancer_interface.h',
'filtered_cb_vecs.h',
'frame_classify.h',
'gain_dequant.h',
'gain_quant.h',
'get_cd_vec.h',
'get_lsp_poly.h',
'get_sync_seq.h',
'hp_input.h',
'hp_output.h',
'defines.h',
'index_conv_dec.h',
'index_conv_enc.h',
'init_decode.h',
'init_encode.h',
'interpolate.h',
'interpolate_samples.h',
'lpc_encode.h',
'lsf_check.h',
'lsf_interpolate_to_poly_dec.h',
'lsf_interpolate_to_poly_enc.h',
'lsf_to_lsp.h',
'lsf_to_poly.h',
'lsp_to_lsf.h',
'my_corr.h',
'nearest_neighbor.h',
'pack_bits.h',
'poly_to_lsf.h',
'poly_to_lsp.h',
'refiner.h',
'simple_interpolate_lsf.h',
'simple_lpc_analysis.h',
'simple_lsf_dequant.h',
'simple_lsf_quant.h',
'smooth.h',
'smooth_out_data.h',
'sort_sq.h',
'split_vq.h',
'state_construct.h',
'state_search.h',
'swap_bytes.h',
'unpack_bits.h',
'vq3.h',
'vq4.h',
'window32_w32.h',
'xcorr_coef.h',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| {'includes': ['../../../../../../common_settings.gypi'], 'targets': [{'target_name': 'iLBC', 'type': '<(library)', 'dependencies': ['../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl'], 'include_dirs': ['../interface'], 'direct_dependent_settings': {'include_dirs': ['../interface']}, 'sources': ['../interface/ilbc.h', 'abs_quant.c', 'abs_quant_loop.c', 'augmented_cb_corr.c', 'bw_expand.c', 'cb_construct.c', 'cb_mem_energy.c', 'cb_mem_energy_augmentation.c', 'cb_mem_energy_calc.c', 'cb_search.c', 'cb_search_core.c', 'cb_update_best_index.c', 'chebyshev.c', 'comp_corr.c', 'constants.c', 'create_augmented_vec.c', 'decode.c', 'decode_residual.c', 'decoder_interpolate_lsf.c', 'do_plc.c', 'encode.c', 'energy_inverse.c', 'enh_upsample.c', 'enhancer.c', 'enhancer_interface.c', 'filtered_cb_vecs.c', 'frame_classify.c', 'gain_dequant.c', 'gain_quant.c', 'get_cd_vec.c', 'get_lsp_poly.c', 'get_sync_seq.c', 'hp_input.c', 'hp_output.c', 'ilbc.c', 'index_conv_dec.c', 'index_conv_enc.c', 'init_decode.c', 'init_encode.c', 'interpolate.c', 'interpolate_samples.c', 'lpc_encode.c', 'lsf_check.c', 'lsf_interpolate_to_poly_dec.c', 'lsf_interpolate_to_poly_enc.c', 'lsf_to_lsp.c', 'lsf_to_poly.c', 'lsp_to_lsf.c', 'my_corr.c', 'nearest_neighbor.c', 'pack_bits.c', 'poly_to_lsf.c', 'poly_to_lsp.c', 'refiner.c', 'simple_interpolate_lsf.c', 'simple_lpc_analysis.c', 'simple_lsf_dequant.c', 'simple_lsf_quant.c', 'smooth.c', 'smooth_out_data.c', 'sort_sq.c', 'split_vq.c', 'state_construct.c', 'state_search.c', 'swap_bytes.c', 'unpack_bits.c', 'vq3.c', 'vq4.c', 'window32_w32.c', 'xcorr_coef.c', 'abs_quant.h', 'abs_quant_loop.h', 'augmented_cb_corr.h', 'bw_expand.h', 'cb_construct.h', 'cb_mem_energy.h', 'cb_mem_energy_augmentation.h', 'cb_mem_energy_calc.h', 'cb_search.h', 'cb_search_core.h', 'cb_update_best_index.h', 'chebyshev.h', 'comp_corr.h', 'constants.h', 'create_augmented_vec.h', 'decode.h', 'decode_residual.h', 'decoder_interpolate_lsf.h', 'do_plc.h', 'encode.h', 'energy_inverse.h', 'enh_upsample.h', 'enhancer.h', 'enhancer_interface.h', 'filtered_cb_vecs.h', 'frame_classify.h', 'gain_dequant.h', 'gain_quant.h', 'get_cd_vec.h', 'get_lsp_poly.h', 'get_sync_seq.h', 'hp_input.h', 'hp_output.h', 'defines.h', 'index_conv_dec.h', 'index_conv_enc.h', 'init_decode.h', 'init_encode.h', 'interpolate.h', 'interpolate_samples.h', 'lpc_encode.h', 'lsf_check.h', 'lsf_interpolate_to_poly_dec.h', 'lsf_interpolate_to_poly_enc.h', 'lsf_to_lsp.h', 'lsf_to_poly.h', 'lsp_to_lsf.h', 'my_corr.h', 'nearest_neighbor.h', 'pack_bits.h', 'poly_to_lsf.h', 'poly_to_lsp.h', 'refiner.h', 'simple_interpolate_lsf.h', 'simple_lpc_analysis.h', 'simple_lsf_dequant.h', 'simple_lsf_quant.h', 'smooth.h', 'smooth_out_data.h', 'sort_sq.h', 'split_vq.h', 'state_construct.h', 'state_search.h', 'swap_bytes.h', 'unpack_bits.h', 'vq3.h', 'vq4.h', 'window32_w32.h', 'xcorr_coef.h']}]} |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include".split(';') if "/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "base_local_planner;dynamic_reconfigure;nav_msgs;pluginlib;sensor_msgs;roscpp;tf2;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldwa_local_planner".split(';') if "-ldwa_local_planner" != "" else []
PROJECT_NAME = "dwa_local_planner"
PROJECT_SPACE_DIR = "/home/lzh/racecar_ws/devel"
PROJECT_VERSION = "1.16.7"
| catkin_package_prefix = ''
project_pkg_config_include_dirs = '/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include'.split(';') if '/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include' != '' else []
project_catkin_depends = 'base_local_planner;dynamic_reconfigure;nav_msgs;pluginlib;sensor_msgs;roscpp;tf2;tf2_ros'.replace(';', ' ')
pkg_config_libraries_with_prefix = '-ldwa_local_planner'.split(';') if '-ldwa_local_planner' != '' else []
project_name = 'dwa_local_planner'
project_space_dir = '/home/lzh/racecar_ws/devel'
project_version = '1.16.7' |
nome = input("Digite seu nome ").strip().lower()
confirmacao = 'silva' in nome
print(f"Seu nome tem silva {confirmacao}") | nome = input('Digite seu nome ').strip().lower()
confirmacao = 'silva' in nome
print(f'Seu nome tem silva {confirmacao}') |
# *****************************
# Environment specific settings
# *****************************
# DO NOT use "DEBUG = True" in production environments
DEBUG = True
# DO NOT use Unsecure Secrets in production environments
# Generate a safe one with:
# python -c "import os; print repr(os.urandom(24));"
SECRET_KEY = (
'This is an UNSECURE Secret. CHANGE THIS for production environments.'
)
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = 'sqlite:///../app.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False # Avoids a SQLAlchemy Warning
| debug = True
secret_key = 'This is an UNSECURE Secret. CHANGE THIS for production environments.'
sqlalchemy_database_uri = 'sqlite:///../app.sqlite'
sqlalchemy_track_modifications = False |
"""
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
...
Example 1:
Input: "A"
Output: 1
Example 2:
Input: "AB"
Output: 28
Example 3:
Input: "ZY"
Output: 701
"""
class Solution:
def titleToNumber(self, s: str) -> int:
ret = 0
for c in s:
n = ord(c) - ord('A') + 1
ret = ret * 26 + n
return ret
| """
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
...
Example 1:
Input: "A"
Output: 1
Example 2:
Input: "AB"
Output: 28
Example 3:
Input: "ZY"
Output: 701
"""
class Solution:
def title_to_number(self, s: str) -> int:
ret = 0
for c in s:
n = ord(c) - ord('A') + 1
ret = ret * 26 + n
return ret |
# coding: utf-8
# # Functions (1) - Creating Functions
# In this lesson we're going to learn about functions in Python. Functions are an important tool when programming and their use can be very complex. It's not the aim of this course to teach you how to implement functional programming, instead, this lesson will give you a grounding in how functions work and an insight into how we can use them to help us create charts with Plotly.
#
# ## What is a function?
#
# A function is a block of code which is used to perform a single action. A function should be reusable, and it should behave predictably. We have already used several built-in functions, such as <code>print()</code> and <code>len()</code>, but Python also allows you to create user-defined functions.
#
# ## How to create a function
#
# The syntax of creating a function is relatively straightforward. We first need to tell Python that we're going to define a function using the <code>def</code> keyword; we must then give the function a name followed by some parentheses (<code> () </code>) and a colon. Function names have the same restrictions as variable names (can't start with a number, can only contain letters, numbers and underscores). After the function name has been defined, any code within the function is indented by four spaces (or a tab):
# ````python
# def <function name>():
# <code to run>
# ````
#
# In the cell below, I'm defining a function which prints the string <code>"This is a function"</code> every time it is called:
# In[10]:
def testFunction():
print("This is a function")
# When we have defined a function, we can call the function as we would call any built-in function that we have already used, remembering to include the parentheses:
# In[11]:
testFunction()
# ## Using arguments in a function
#
# When we use the <code>len()</code> function, we have to tell that function which object we want the length of. We are passing that object as an argument to the function:
# In[12]:
len("abcdefg")
# We can do the same with user-defined functions. To do so, we create the function as normal, but inside the parentheses we can put argument names. We can put as many as we like, but each must be separated by a comma:
# ````python
# def <function name>(<arg1>, <arg2>, . . . <argN>):
# <code to run>
# ````
#
# We can then reference these arguments inside the function. In the cell below, I've written a function which prints out two items. Notice that I've converted each item to a string using the <code>str()</code> function - this ensures that the function behaves predictably - without converting an integer to a string, the code wouldn't run.
# In[13]:
def testFunction2(item1, item2):
print("The first item is: " + str(item1) + ", the second item is: " + str(item2))
# We can then use this function an pass arguments to it:
# In[14]:
testFunction2('abc', 20)
# The function will create a different output if we pass different arguments to it. This is because the arguments which are passed to a function only endure for the duration of that function.
# In[15]:
testFunction2('howdy', 'partner')
# ## Returning objects from a function
#
# Functions are useful when we use them to create or modify an object. Variables which are created inside a function are not available to the rest of the code, unless we return them (or specifically declare them to be <a href="http://stackoverflow.com/questions/423379/using-global-variables-in-a-function-other-than-the-one-that-created-them">global variables</a>)
#
# We can return an object created inside a function by using the return keyword; we must assign the output of a function to an object and we cannot write any more code after the return statment.
#
# In the cell below, I create a function which takes returns a list of alternating valus. This function takes three arguments, two of which are the values to alternate, whilst the third is the number of times they must be repeated:
# In[16]:
def alternateList(item1, item2, repeats):
alternate = [item1, item2]
altRepeat = alternate * repeats
return altRepeat
# Because the function returns a value we must assign the output that is return to a variable:
# In[17]:
repeated1 = alternateList(5, 50, 3)
# There are two variables created inside this function; <code>alternate</code> and <code>altRepeat</code>. These variables exist only within the function and we cannot access them in open code:
# In[18]:
print(alternate)
# In[19]:
print(altRepeat)
# But because we returned the value of the variable <code>altRepeat</code>, creating a new variable with that value, we can now see what the function <code>alternateList()</code> has created:
# In[20]:
repeated1
# We can return two or more variables from a function by separating each variable with a comma. We must assign each to an object:
# In[21]:
def alternateList(item1, item2, repeats):
alternate = [item1, item2]
altRepeat = alternate * repeats
return alternate, altRepeat
pair, rpt = alternateList(77, 99, 5)
print(pair)
# In[22]:
print(rpt)
# ### What have we learnt this lesson?
# In this lesson we've learnt how to define a function using the <code>def</code> keyword, and how to pass arguments to the function. We've seen that these arguments only hold their value within the function, and that we can use a return statement to return one or more values from within the function.
#
# In the next lesson we'll look at how we can use functions to help us make our charts.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
| def test_function():
print('This is a function')
test_function()
len('abcdefg')
def test_function2(item1, item2):
print('The first item is: ' + str(item1) + ', the second item is: ' + str(item2))
test_function2('abc', 20)
test_function2('howdy', 'partner')
def alternate_list(item1, item2, repeats):
alternate = [item1, item2]
alt_repeat = alternate * repeats
return altRepeat
repeated1 = alternate_list(5, 50, 3)
print(alternate)
print(altRepeat)
repeated1
def alternate_list(item1, item2, repeats):
alternate = [item1, item2]
alt_repeat = alternate * repeats
return (alternate, altRepeat)
(pair, rpt) = alternate_list(77, 99, 5)
print(pair)
print(rpt) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.