content
stringlengths
7
1.05M
fixed_cases
stringlengths
1
1.28M
def outer (): def inner(): print(x ) x = 12 inner() outer()
def outer(): def inner(): print(x) x = 12 inner() outer()
print ("--------------------------------------------------") cad1 = "separar" print (cad1) nuevo_string1 = ",".join(cad1) print (nuevo_string1) print ("--------------------------------------------------") cad2 = "mi archivo de texto.txt" print (cad2) nuevo_string2 = cad2.split() nuevo_string2 = "_".join(nuevo_string2) print (nuevo_string2) print ("--------------------------------------------------") cad3 = "su clave es: 1540" print (cad3) nuevo_string3 = cad3.replace(cad3[13:], "XXXX") print (nuevo_string3) print ("--------------------------------------------------") cad4 = "2552552550" print (cad4) nuevo_string4 = cad4.replace("55", "55.") print (nuevo_string4) nuevo_string4 = nuevo_string4.split(".") print (nuevo_string4) nuevo_string4 = ".".join(nuevo_string4) print (nuevo_string4) print ("--------------------------------------------------")
print('--------------------------------------------------') cad1 = 'separar' print(cad1) nuevo_string1 = ','.join(cad1) print(nuevo_string1) print('--------------------------------------------------') cad2 = 'mi archivo de texto.txt' print(cad2) nuevo_string2 = cad2.split() nuevo_string2 = '_'.join(nuevo_string2) print(nuevo_string2) print('--------------------------------------------------') cad3 = 'su clave es: 1540' print(cad3) nuevo_string3 = cad3.replace(cad3[13:], 'XXXX') print(nuevo_string3) print('--------------------------------------------------') cad4 = '2552552550' print(cad4) nuevo_string4 = cad4.replace('55', '55.') print(nuevo_string4) nuevo_string4 = nuevo_string4.split('.') print(nuevo_string4) nuevo_string4 = '.'.join(nuevo_string4) print(nuevo_string4) print('--------------------------------------------------')
def is_even(n): if n % 2 == 0: return True def is_positive(n): if n >= 0: return True def get_result(collection): result = [str(x) for x in collection] return ", ".join(result) numbers = [int(x) for x in input().split(", ")] positive_nums = [] negative_nums = [] even_nums = [] odd_nums = [] for n in numbers: if is_positive(n): positive_nums.append(n) else: negative_nums.append(n) if is_even(n): even_nums.append(n) else: odd_nums.append(n) print(f"Positive: {get_result(positive_nums)}") print(f"Negative: {get_result(negative_nums)}") print(f"Even: {get_result(even_nums)}") print(f"Odd: {get_result(odd_nums)}")
def is_even(n): if n % 2 == 0: return True def is_positive(n): if n >= 0: return True def get_result(collection): result = [str(x) for x in collection] return ', '.join(result) numbers = [int(x) for x in input().split(', ')] positive_nums = [] negative_nums = [] even_nums = [] odd_nums = [] for n in numbers: if is_positive(n): positive_nums.append(n) else: negative_nums.append(n) if is_even(n): even_nums.append(n) else: odd_nums.append(n) print(f'Positive: {get_result(positive_nums)}') print(f'Negative: {get_result(negative_nums)}') print(f'Even: {get_result(even_nums)}') print(f'Odd: {get_result(odd_nums)}')
def multi_print(number = 3, word = "Hallo"): for i in range(0, number): print(str(i) + " " + word) multi_print(1, "Hallo") print("--") multi_print() print("--") multi_print(2) print("--") multi_print(word = "Welt") print("--") multi_print(word = "Welt", number = 5) print("--")
def multi_print(number=3, word='Hallo'): for i in range(0, number): print(str(i) + ' ' + word) multi_print(1, 'Hallo') print('--') multi_print() print('--') multi_print(2) print('--') multi_print(word='Welt') print('--') multi_print(word='Welt', number=5) print('--')
""" The structure of a blockchain, Which is accessed like JSON or a dict. """ block = { "index": 1, "timestamp": 1506057125.900785, "transactions": [ { "sender": "8527147fe1f5426f9dd545de4b27ee00", "recipient": "a77f5cdfa2934df3954a5c7c7da5df1f", "amount": 5, } ], "proof": 324984774000, "previous_hash": "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", }
""" The structure of a blockchain, Which is accessed like JSON or a dict. """ block = {'index': 1, 'timestamp': 1506057125.900785, 'transactions': [{'sender': '8527147fe1f5426f9dd545de4b27ee00', 'recipient': 'a77f5cdfa2934df3954a5c7c7da5df1f', 'amount': 5}], 'proof': 324984774000, 'previous_hash': '2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824'}
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. _blank_symbol_ctc = '_' _pad_symbol = '#' _characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ\' ' # Export all symbols: symbols = [_blank_symbol_ctc] + list(_characters) + [_pad_symbol] # Mappings from symbol to numeric ID and vice versa # (remember - ID zero is reserved for blank symbol of CTC loss) _symbol_to_id = {s: i for i, s in enumerate(symbols)} _id_to_symbol = {i: s for i, s in enumerate(symbols)} def text_to_sequence(text): """ converts text to sequence of numeric IDs """ # putting space for punctuations space_id = _symbol_to_id[' '] return [_symbol_to_id.get(s, space_id) if s not in [_blank_symbol_ctc, _pad_symbol] else space_id for s in text] def sequence_to_text(sequence, seq_length): """ converts numeric sequence to text """ pad_id = _symbol_to_id[_pad_symbol] return "".join([_id_to_symbol[id] for id in sequence[0:seq_length] if id != pad_id]) def pad_text_sequence(text_sequence, max_text_sequence_length): """ pad numeric text sequence if required """ pad = max_text_sequence_length - len(text_sequence) if pad <= 0: return text_sequence[0:max_text_sequence_length] return text_sequence + [_symbol_to_id[_pad_symbol]] * pad
_blank_symbol_ctc = '_' _pad_symbol = '#' _characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ' " symbols = [_blank_symbol_ctc] + list(_characters) + [_pad_symbol] _symbol_to_id = {s: i for (i, s) in enumerate(symbols)} _id_to_symbol = {i: s for (i, s) in enumerate(symbols)} def text_to_sequence(text): """ converts text to sequence of numeric IDs """ space_id = _symbol_to_id[' '] return [_symbol_to_id.get(s, space_id) if s not in [_blank_symbol_ctc, _pad_symbol] else space_id for s in text] def sequence_to_text(sequence, seq_length): """ converts numeric sequence to text """ pad_id = _symbol_to_id[_pad_symbol] return ''.join([_id_to_symbol[id] for id in sequence[0:seq_length] if id != pad_id]) def pad_text_sequence(text_sequence, max_text_sequence_length): """ pad numeric text sequence if required """ pad = max_text_sequence_length - len(text_sequence) if pad <= 0: return text_sequence[0:max_text_sequence_length] return text_sequence + [_symbol_to_id[_pad_symbol]] * pad
PEER_DID_NUMALGO_0 = "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" DID_DOC_NUMALGO_O_BASE58 = """ { "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "authentication": [ { "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2018", "controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "publicKeyBase58": "ByHnpUCFb1vAfh9CFZ8ZkmUZguURW8nSw889hy6rD8L7" } ] } """ DID_DOC_NUMALGO_O_MULTIBASE = """ { "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "authentication": [ { "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2020", "controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" } ] } """ DID_DOC_NUMALGO_O_JWK = """ { "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "authentication": [ { "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "JsonWebKey2020", "controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "publicKeyJwk": { "kty": "OKP", "crv": "Ed25519", "x": "owBhCbktDjkfS6PdQddT0D3yjSitaSysP3YimJ_YgmA" } } ] } """ PEER_DID_NUMALGO_2 = ( "did:peer:2" + ".Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc" + ".Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" + ".Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg" + ".SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0" ) DID_DOC_NUMALGO_2_BASE58 = """ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "authentication": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2018", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyBase58": "ByHnpUCFb1vAfh9CFZ8ZkmUZguURW8nSw889hy6rD8L7" }, { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg", "type": "Ed25519VerificationKey2018", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyBase58": "3M5RCDjPTWPkKSN3sxUmmMqHbmRPegYP1tjcKyrDbt9J" } ], "keyAgreement": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc", "type": "X25519KeyAgreementKey2019", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyBase58": "JhNWeSVLMYccCk7iopQW4guaSJTojqpMEELgSLhKwRr" } ], "service": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0", "type": "DIDCommMessaging", "serviceEndpoint": "https://example.com/endpoint", "routingKeys": [ "did:example:somemediator#somekey" ], "accept": [ "didcomm/v2", "didcomm/aip2;env=rfc587" ] } ] } """ DID_DOC_NUMALGO_2_MULTIBASE = """ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "authentication": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" }, { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg", "type": "Ed25519VerificationKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyMultibase": "z6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg" } ], "keyAgreement": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc", "type": "X25519KeyAgreementKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyMultibase": "z6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc" } ], "service": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0", "type": "DIDCommMessaging", "serviceEndpoint": "https://example.com/endpoint", "routingKeys": [ "did:example:somemediator#somekey" ], "accept": [ "didcomm/v2", "didcomm/aip2;env=rfc587" ] } ] } """ DID_DOC_NUMALGO_2_JWK = """ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "authentication": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "JsonWebKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyJwk": { "kty": "OKP", "crv": "Ed25519", "x": "owBhCbktDjkfS6PdQddT0D3yjSitaSysP3YimJ_YgmA" } }, { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg", "type": "JsonWebKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyJwk": { "kty": "OKP", "crv": "Ed25519", "x": "Itv8B__b1-Jos3LCpUe8EdTFGTCa_Dza6_3848P3R70" } } ], "keyAgreement": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc", "type": "JsonWebKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0", "publicKeyJwk": { "kty": "OKP", "crv": "X25519", "x": "BIiFcQEn3dfvB2pjlhOQQour6jXy9d5s2FKEJNTOJik" } } ], "service": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0", "type": "DIDCommMessaging", "serviceEndpoint": "https://example.com/endpoint", "routingKeys": [ "did:example:somemediator#somekey" ], "accept": [ "didcomm/v2", "didcomm/aip2;env=rfc587" ] } ] } """ PEER_DID_NUMALGO_2_2_SERVICES = ( "did:peer:2" + ".Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud" + ".Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" + ".SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0" ) DID_DOC_NUMALGO_2_MULTIBASE_2_SERVICES = """ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0", "authentication": [ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2020", "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0", "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" } ], "keyAgreement": [ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud", "type": "X25519KeyAgreementKey2020", "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0", "publicKeyMultibase": "z6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud" } ], "service": [ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#didcommmessaging-0", "type": "DIDCommMessaging", "serviceEndpoint": "https://example.com/endpoint", "routingKeys": [ "did:example:somemediator#somekey" ] }, { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#example-1", "type": "example", "serviceEndpoint": "https://example.com/endpoint2", "routingKeys": [ "did:example:somemediator#somekey2" ], "accept": ["didcomm/v2", "didcomm/aip2;env=rfc587"] } ] } """ PEER_DID_NUMALGO_2_NO_SERVICES = ( "did:peer:2" + ".Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud" + ".Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" ) DID_DOC_NUMALGO_2_MULTIBASE_NO_SERVICES = """ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "authentication": [ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2020", "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" } ], "keyAgreement": [ { "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud", "type": "X25519KeyAgreementKey2020", "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "publicKeyMultibase": "z6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud" } ] } """ PEER_DID_NUMALGO_2_MINIMAL_SERVICES = "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9" DID_DOC_NUMALGO_2_MULTIBASE_MINIMAL_SERVICES = """ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9", "authentication": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V", "type": "Ed25519VerificationKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9", "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V" }, { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg", "type": "Ed25519VerificationKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9", "publicKeyMultibase": "z6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg" } ], "keyAgreement": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc", "type": "X25519KeyAgreementKey2020", "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9", "publicKeyMultibase": "z6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc" } ], "service": [ { "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#didcommmessaging-0", "serviceEndpoint": "https://example.com/endpoint", "type": "DIDCommMessaging" } ] } """
peer_did_numalgo_0 = 'did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V' did_doc_numalgo_o_base58 = '\n {\n "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "authentication": [\n {\n "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2018",\n "controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "publicKeyBase58": "ByHnpUCFb1vAfh9CFZ8ZkmUZguURW8nSw889hy6rD8L7"\n }\n ]\n }\n ' did_doc_numalgo_o_multibase = '\n {\n "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "authentication": [\n {\n "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"\n }\n ]\n }\n ' did_doc_numalgo_o_jwk = '\n {\n "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "authentication": [\n {\n "id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "JsonWebKey2020",\n "controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "publicKeyJwk": {\n "kty": "OKP",\n "crv": "Ed25519",\n "x": "owBhCbktDjkfS6PdQddT0D3yjSitaSysP3YimJ_YgmA"\n }\n }\n ]\n }\n ' peer_did_numalgo_2 = 'did:peer:2' + '.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc' + '.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V' + '.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg' + '.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0' did_doc_numalgo_2_base58 = '\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "authentication": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2018",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyBase58": "ByHnpUCFb1vAfh9CFZ8ZkmUZguURW8nSw889hy6rD8L7"\n },\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",\n "type": "Ed25519VerificationKey2018",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyBase58": "3M5RCDjPTWPkKSN3sxUmmMqHbmRPegYP1tjcKyrDbt9J"\n }\n ],\n "keyAgreement": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",\n "type": "X25519KeyAgreementKey2019",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyBase58": "JhNWeSVLMYccCk7iopQW4guaSJTojqpMEELgSLhKwRr"\n }\n ],\n "service": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0",\n "type": "DIDCommMessaging",\n "serviceEndpoint": "https://example.com/endpoint",\n "routingKeys": [\n "did:example:somemediator#somekey"\n ],\n "accept": [\n "didcomm/v2", "didcomm/aip2;env=rfc587"\n ]\n }\n ]\n }\n ' did_doc_numalgo_2_multibase = '\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "authentication": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"\n },\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyMultibase": "z6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg"\n }\n ],\n "keyAgreement": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",\n "type": "X25519KeyAgreementKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyMultibase": "z6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc"\n }\n ],\n "service": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0",\n "type": "DIDCommMessaging",\n "serviceEndpoint": "https://example.com/endpoint",\n "routingKeys": [\n "did:example:somemediator#somekey"\n ],\n "accept": [\n "didcomm/v2", "didcomm/aip2;env=rfc587"\n ]\n }\n ]\n }\n ' did_doc_numalgo_2_jwk = '\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "authentication": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "JsonWebKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyJwk": {\n "kty": "OKP",\n "crv": "Ed25519",\n "x": "owBhCbktDjkfS6PdQddT0D3yjSitaSysP3YimJ_YgmA"\n }\n },\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",\n "type": "JsonWebKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyJwk": {\n "kty": "OKP",\n "crv": "Ed25519",\n "x": "Itv8B__b1-Jos3LCpUe8EdTFGTCa_Dza6_3848P3R70"\n }\n }\n ],\n "keyAgreement": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",\n "type": "JsonWebKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",\n "publicKeyJwk": {\n "kty": "OKP",\n "crv": "X25519",\n "x": "BIiFcQEn3dfvB2pjlhOQQour6jXy9d5s2FKEJNTOJik"\n }\n }\n ],\n "service": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0",\n "type": "DIDCommMessaging",\n "serviceEndpoint": "https://example.com/endpoint",\n "routingKeys": [\n "did:example:somemediator#somekey"\n ],\n "accept": [\n "didcomm/v2", "didcomm/aip2;env=rfc587"\n ]\n }\n ]\n }\n ' peer_did_numalgo_2_2_services = 'did:peer:2' + '.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud' + '.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V' + '.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0' did_doc_numalgo_2_multibase_2_services = '\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0",\n "authentication": [\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0",\n "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"\n }\n ],\n "keyAgreement": [\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud",\n "type": "X25519KeyAgreementKey2020",\n "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0",\n "publicKeyMultibase": "z6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud"\n }\n ],\n "service": [\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#didcommmessaging-0",\n "type": "DIDCommMessaging",\n "serviceEndpoint": "https://example.com/endpoint",\n "routingKeys": [\n "did:example:somemediator#somekey"\n ]\n },\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#example-1",\n "type": "example",\n "serviceEndpoint": "https://example.com/endpoint2",\n "routingKeys": [\n "did:example:somemediator#somekey2"\n ],\n "accept": ["didcomm/v2", "didcomm/aip2;env=rfc587"]\n }\n ]\n }\n ' peer_did_numalgo_2_no_services = 'did:peer:2' + '.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud' + '.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V' did_doc_numalgo_2_multibase_no_services = '\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "authentication": [\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"\n }\n ],\n "keyAgreement": [\n {\n "id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud",\n "type": "X25519KeyAgreementKey2020",\n "controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "publicKeyMultibase": "z6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud"\n }\n ]\n }\n ' peer_did_numalgo_2_minimal_services = 'did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9' did_doc_numalgo_2_multibase_minimal_services = '\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",\n "authentication": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",\n "publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"\n },\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",\n "type": "Ed25519VerificationKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",\n "publicKeyMultibase": "z6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg"\n }\n ],\n "keyAgreement": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",\n "type": "X25519KeyAgreementKey2020",\n "controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",\n "publicKeyMultibase": "z6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc"\n }\n ],\n "service": [\n {\n "id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#didcommmessaging-0",\n "serviceEndpoint": "https://example.com/endpoint",\n "type": "DIDCommMessaging"\n }\n ]\n }\n '
class Solution: def longestSubsequence(self, arr: List[int], diff: int) -> int: dp = {} for num in arr: if num - diff in dp: cnt = dp.pop(num - diff) dp[num] = max(dp.get(num, 0), cnt + 1) else: dp[num] = max(dp.get(num, 0), 1) return max(dp.values())
class Solution: def longest_subsequence(self, arr: List[int], diff: int) -> int: dp = {} for num in arr: if num - diff in dp: cnt = dp.pop(num - diff) dp[num] = max(dp.get(num, 0), cnt + 1) else: dp[num] = max(dp.get(num, 0), 1) return max(dp.values())
numbers = [1, 45, 31, 12, 60] for number in numbers: if number % 8 == 0: print ("THe numbers are unacccepetble") break else: print ("The Numbers are good")
numbers = [1, 45, 31, 12, 60] for number in numbers: if number % 8 == 0: print('THe numbers are unacccepetble') break else: print('The Numbers are good')
''' Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases. Note: For the purpose of this problem, we define empty string as valid palindrome. Example 1: Input: "A man, a plan, a canal: Panama" Output: true Example 2: Input: "race a car" Output: false ''' class Solution(object): def isPalindrome(self, s): """ :type s: str :rtype: bool """ alpha = [] for c in s: if c.isalnum(): alpha.append(c.lower()) for i in xrange(len(alpha) // 2): if alpha[i] != alpha[len(alpha) - i - 1]: return False return True
""" Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases. Note: For the purpose of this problem, we define empty string as valid palindrome. Example 1: Input: "A man, a plan, a canal: Panama" Output: true Example 2: Input: "race a car" Output: false """ class Solution(object): def is_palindrome(self, s): """ :type s: str :rtype: bool """ alpha = [] for c in s: if c.isalnum(): alpha.append(c.lower()) for i in xrange(len(alpha) // 2): if alpha[i] != alpha[len(alpha) - i - 1]: return False return True
class rational: def __init__(self, x, y): gcd = self.__gcd(x, y) self.d = x // gcd # denominator self.n = y // gcd # numerator def __gcd(self, x, y): while y > 0: x, y = y, x % y return x def __add__(self, other): return rational( self.n * other.d + self.d * other.n, self.n * other.n ) def __mul__(self, other): return rational( self.d * other.d, self.n * other.n ) @staticmethod def add(r1, r2): return r1 + r2 @staticmethod def mul(r1, r2): return r1 * r2 def __str__(self): return "{}/{}".format(self.d, self.n) if __name__ == "__main__": r1 = rational(8, 6) r2 = rational(5, 2) print("r1 =", r1) print("r2 =", r2) print("r1 + r2 =", r1 + r2) print("r1 * r2 =", r1 * r2) print("add(r1, r2) =", rational.add(r1, r2)) print("mul(r1, r2) =", rational.mul(r1, r2))
class Rational: def __init__(self, x, y): gcd = self.__gcd(x, y) self.d = x // gcd self.n = y // gcd def __gcd(self, x, y): while y > 0: (x, y) = (y, x % y) return x def __add__(self, other): return rational(self.n * other.d + self.d * other.n, self.n * other.n) def __mul__(self, other): return rational(self.d * other.d, self.n * other.n) @staticmethod def add(r1, r2): return r1 + r2 @staticmethod def mul(r1, r2): return r1 * r2 def __str__(self): return '{}/{}'.format(self.d, self.n) if __name__ == '__main__': r1 = rational(8, 6) r2 = rational(5, 2) print('r1 =', r1) print('r2 =', r2) print('r1 + r2 =', r1 + r2) print('r1 * r2 =', r1 * r2) print('add(r1, r2) =', rational.add(r1, r2)) print('mul(r1, r2) =', rational.mul(r1, r2))
#module for data updating def update_data_set(text_file_name:str, data:{'Time':int, "Mood":int, "Age":int}, class_res:int): '''Given a text file name and data, it will open the text file and update the file with new data in format "int,int,int" ''' file = open(text_file_name, "a") elements_for_data_input=["\n", data['Time'], ',', data['Mood'], ',', data['Age'], ',', class_res] for item in range(len(elements_for_data_input)): file.write(str(elements_for_data_input[item])) file.close()
def update_data_set(text_file_name: str, data: {'Time': int, 'Mood': int, 'Age': int}, class_res: int): """Given a text file name and data, it will open the text file and update the file with new data in format "int,int,int" """ file = open(text_file_name, 'a') elements_for_data_input = ['\n', data['Time'], ',', data['Mood'], ',', data['Age'], ',', class_res] for item in range(len(elements_for_data_input)): file.write(str(elements_for_data_input[item])) file.close()
# https://www.codementor.io/moyosore/a-dive-into-python-closures-and-decorators-part-1-9mpr98pgr def add_all_arguments(*args): result = 0 for i in args: result += i return result print(add_all_arguments(1, 5, 7, 9, 10)) # 32 print(add_all_arguments(1, 9)) # 10 print(add_all_arguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) # 55 print(add_all_arguments(1)) # 1 print(add_all_arguments()) # 0 def print_arguments(**kwargs): print(kwargs) print_arguments(name="Moyosore") # {'name': 'moyosore'} print_arguments(name="Moyosore", country="Nigeria") # {'name': 'moyosore', 'country': 'Nigeria'} print_arguments() # {} def print_argument_values(**kwargs): for key, value in kwargs.items(): print(f"{key}: {value}") print_argument_values(name="Moyosore", country="Nigeria") # name: Moyosore # country: Nigeria """ Args and kwargs can be used together in a function, with args always coming before kwargs. If there are any other required arguments, they come before args and kwargs """ def add_and_mul1(*args, **kwargs): pass def add_and_mul2(my_arg, *args, **kwargs): pass def add_and_mul3(my_arg, my_arg_1, *args, **kwargs): pass
def add_all_arguments(*args): result = 0 for i in args: result += i return result print(add_all_arguments(1, 5, 7, 9, 10)) print(add_all_arguments(1, 9)) print(add_all_arguments(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) print(add_all_arguments(1)) print(add_all_arguments()) def print_arguments(**kwargs): print(kwargs) print_arguments(name='Moyosore') print_arguments(name='Moyosore', country='Nigeria') print_arguments() def print_argument_values(**kwargs): for (key, value) in kwargs.items(): print(f'{key}: {value}') print_argument_values(name='Moyosore', country='Nigeria') '\nArgs and kwargs can be used together in a function, with args always coming before kwargs. If there are any other required arguments, they come before args and kwargs\n\n' def add_and_mul1(*args, **kwargs): pass def add_and_mul2(my_arg, *args, **kwargs): pass def add_and_mul3(my_arg, my_arg_1, *args, **kwargs): pass
# Do DFS on input. Maintin a set of keys_collected. Do not push a room if it's there in the keys_collected set. class Solution(object): def canVisitAllRooms(self, rooms): """ :type rooms: List[List[int]] :rtype: bool """ S = [rooms[0]] keys_collected = set([0]) while S: node = S.pop() for k in node: if k not in keys_collected: S.append(rooms[k]) keys_collected.add(k) if len(keys_collected) == len(rooms): return True return False
class Solution(object): def can_visit_all_rooms(self, rooms): """ :type rooms: List[List[int]] :rtype: bool """ s = [rooms[0]] keys_collected = set([0]) while S: node = S.pop() for k in node: if k not in keys_collected: S.append(rooms[k]) keys_collected.add(k) if len(keys_collected) == len(rooms): return True return False
"""Constants""" DOMAIN = "grocy" DOMAIN_DATA = "{}_data".format(DOMAIN) DOMAIN_EVENT = "grocy_updated" DOMAIN_SERVICE = "{}" # Integration local data DATA_GROCY = "grocy" DATA_DATA = "data" DATA_ENTITIES = "entities" DATA_STORE_CONF = "store_conf" # Domain events EVENT_ADDED_TO_LIST='added_to_list' EVENT_SUBTRACT_FROM_LIST='subtract_from_list' EVENT_PRODUCT_REMOVED='product_removed' EVENT_PRODUCT_ADDED='product_added' EVENT_PRODUCT_UPDATED='product_updated' EVENT_SYNC_DONE='sync_done' EVENT_GROCY_ERROR='error' # Configuration CONF_APIKEY = "apikey" CONF_AMOUNT = "amount" CONF_NAME = "name" CONF_VALUE = "value" CONF_SHOPPING_LIST_ID = 'shopping_list' CONF_PRODUCT_DESCRIPTION = 'product_description' CONF_PRODUCT_GROUP_ID = 'product_group_id' CONF_PRODUCT_LOCATION_ID = 'product_location_id' CONF_STORE = 'store' CONF_BARCODE = 'barcode' CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement' # Defaults DEFAULT_AMOUNT = 1 DEFAULT_STORE = '' DEFAULT_SHOPPING_LIST_ID = 1 DEFAULT_PRODUCT_DESCRIPTION = "" # Services ADD_TO_LIST_SERVICE = DOMAIN_SERVICE.format('add_to_list') SUBTRACT_FROM_LIST_SERVICE = DOMAIN_SERVICE.format('subtract_from_list') ADD_PRODUCT_SERVICE = DOMAIN_SERVICE.format('add_product') REMOVE_PRODUCT_SERVICE = DOMAIN_SERVICE.format('remove_product') ADD_FAVORITE_SERVICE = DOMAIN_SERVICE.format('add_favorite') REMOVE_FAVORITE_SERVICE = DOMAIN_SERVICE.format('remove_favorite') FILL_CART_SERVICE = DOMAIN_SERVICE.format('fill_cart') EMPTY_CART_SERVICE = DOMAIN_SERVICE.format('empty_cart') SYNC_SERVICE = DOMAIN_SERVICE.format('sync') DEBUG_SERVICE = DOMAIN_SERVICE.format('debug') # Device classes STOCK_NAME = "stock" CHORES_NAME = "chores" PRODUCTS_NAME = "products" SHOPPING_LIST_NAME = "shopping_list" SHOPPING_LISTS_NAME = "shopping_lists" LOCATIONS_NAME = "locations" QUANTITY_UNITS_NAME = "quantity_units" PRODUCT_GROUPS_NAME = "product_groups"
"""Constants""" domain = 'grocy' domain_data = '{}_data'.format(DOMAIN) domain_event = 'grocy_updated' domain_service = '{}' data_grocy = 'grocy' data_data = 'data' data_entities = 'entities' data_store_conf = 'store_conf' event_added_to_list = 'added_to_list' event_subtract_from_list = 'subtract_from_list' event_product_removed = 'product_removed' event_product_added = 'product_added' event_product_updated = 'product_updated' event_sync_done = 'sync_done' event_grocy_error = 'error' conf_apikey = 'apikey' conf_amount = 'amount' conf_name = 'name' conf_value = 'value' conf_shopping_list_id = 'shopping_list' conf_product_description = 'product_description' conf_product_group_id = 'product_group_id' conf_product_location_id = 'product_location_id' conf_store = 'store' conf_barcode = 'barcode' conf_unit_of_measurement = 'unit_of_measurement' default_amount = 1 default_store = '' default_shopping_list_id = 1 default_product_description = '' add_to_list_service = DOMAIN_SERVICE.format('add_to_list') subtract_from_list_service = DOMAIN_SERVICE.format('subtract_from_list') add_product_service = DOMAIN_SERVICE.format('add_product') remove_product_service = DOMAIN_SERVICE.format('remove_product') add_favorite_service = DOMAIN_SERVICE.format('add_favorite') remove_favorite_service = DOMAIN_SERVICE.format('remove_favorite') fill_cart_service = DOMAIN_SERVICE.format('fill_cart') empty_cart_service = DOMAIN_SERVICE.format('empty_cart') sync_service = DOMAIN_SERVICE.format('sync') debug_service = DOMAIN_SERVICE.format('debug') stock_name = 'stock' chores_name = 'chores' products_name = 'products' shopping_list_name = 'shopping_list' shopping_lists_name = 'shopping_lists' locations_name = 'locations' quantity_units_name = 'quantity_units' product_groups_name = 'product_groups'
number_for_fibonacci = int(input("Enter Fibonacci number: ")) first_num = 0 second_num = 1 while second_num <= number_for_fibonacci: print(second_num) first_num, second_num = second_num, first_num + second_num print("Fibonacci number")
number_for_fibonacci = int(input('Enter Fibonacci number: ')) first_num = 0 second_num = 1 while second_num <= number_for_fibonacci: print(second_num) (first_num, second_num) = (second_num, first_num + second_num) print('Fibonacci number')
def collected_materials(key_materials_dict:dict, junk_materials_dict:dict, material:str, quantity:int): if material=="shards" or material=="fragments" or material=="motes": key_materials_dict[material]+=quantity else: if material not in junk_materials.keys(): junk_materials[material]=quantity else: junk_materials[material]+=quantity key_materials = {"shards": 0, "fragments": 0, "motes": 0} junk_materials={} items_obtained="" while items_obtained == "": current_line=input().split() for i in range (0,len(current_line),2): material_quantity=int(current_line[i]) material_name=current_line[i+1].lower() collected_materials(key_materials,junk_materials,material_name,material_quantity) if key_materials['shards']>=250: items_obtained="Shadowmourne" key_materials["shards"]-=250 break elif key_materials['fragments']>=250: items_obtained="Valanyr" key_materials['fragments']-=250 break elif key_materials['motes']>=250: items_obtained="Dragonwrath" key_materials['motes']-=250 break print(f"{items_obtained} obtained!") for (material_name,material_quantity) in sorted(key_materials.items(),key=lambda kvp:(-kvp[1],kvp[0])): print(f"{material_name}: {material_quantity}") for (junk_materials_name,junk_materials_quantity) in sorted(junk_materials.items(), key=lambda kvp: kvp[0]): print(f"{junk_materials_name}: {junk_materials_quantity}")
def collected_materials(key_materials_dict: dict, junk_materials_dict: dict, material: str, quantity: int): if material == 'shards' or material == 'fragments' or material == 'motes': key_materials_dict[material] += quantity elif material not in junk_materials.keys(): junk_materials[material] = quantity else: junk_materials[material] += quantity key_materials = {'shards': 0, 'fragments': 0, 'motes': 0} junk_materials = {} items_obtained = '' while items_obtained == '': current_line = input().split() for i in range(0, len(current_line), 2): material_quantity = int(current_line[i]) material_name = current_line[i + 1].lower() collected_materials(key_materials, junk_materials, material_name, material_quantity) if key_materials['shards'] >= 250: items_obtained = 'Shadowmourne' key_materials['shards'] -= 250 break elif key_materials['fragments'] >= 250: items_obtained = 'Valanyr' key_materials['fragments'] -= 250 break elif key_materials['motes'] >= 250: items_obtained = 'Dragonwrath' key_materials['motes'] -= 250 break print(f'{items_obtained} obtained!') for (material_name, material_quantity) in sorted(key_materials.items(), key=lambda kvp: (-kvp[1], kvp[0])): print(f'{material_name}: {material_quantity}') for (junk_materials_name, junk_materials_quantity) in sorted(junk_materials.items(), key=lambda kvp: kvp[0]): print(f'{junk_materials_name}: {junk_materials_quantity}')
t_host = "localhost" t_port = "5432" t_dbname = "insert_db_name" t_user = "insert_user" t_pw = "insert_pw" photo_request_data = { "CAD6E": { "id": 218, "coords_x": -5.9357153, "coords_y": 54.5974748, "land_hash": "1530850C9A6F5FF56B66AC16301584EC" }, "TEST1": { "id": 1, "coords_x": 11.111111, "coords_y": 22.222222, "land_hash": "11111111111111111111111" } }
t_host = 'localhost' t_port = '5432' t_dbname = 'insert_db_name' t_user = 'insert_user' t_pw = 'insert_pw' photo_request_data = {'CAD6E': {'id': 218, 'coords_x': -5.9357153, 'coords_y': 54.5974748, 'land_hash': '1530850C9A6F5FF56B66AC16301584EC'}, 'TEST1': {'id': 1, 'coords_x': 11.111111, 'coords_y': 22.222222, 'land_hash': '11111111111111111111111'}}
def pad_in(string: str, space: int) -> str: """ >>> pad_in('abc', 0) 'abc' >>> pad_in('abc', 2) ' abc' """ return "".join([" "] * space) + string def without_ends(string: str) -> str: """ >>> without_ends('abc') 'b' """ return string[1:-1] def without_first(string: str) -> str: """ >>> without_first('abc') 'bc' """ return string[1:] def without_last(string: str) -> str: """ >>> without_last('abc') 'ab' """ return string[:-1] def quote(string: str) -> str: """ >>> quote('abc') '\"abc\"' >>> quote('"abc"') '\"abc\"' """ return string if string.startswith('"') and string.endswith('"') else f'"{string}"' def handle(string: str) -> str: """ >>> handle('https://github.com/user/repo') 'user/repo' >>> handle('user/repo') 'user/repo' >>> handle('') '' """ splt = string.split("/") return "/".join(splt[-2:] if len(splt) >= 2 else splt) def pluralize(count: int, unit: str) -> str: """ Pluralize a count and given its units. >>> pluralize(1, 'file') '1 file' >>> pluralize(2, 'file') '2 files' >>> pluralize(0, 'file') '0 files' """ return f"{count} {unit}{'s' if count != 1 else ''}" def remove_prefix(string: str, prefix: str) -> str: """ >>> remove_prefix('abc', 'ab') 'c' >>> remove_prefix('abc', 'd') 'abc' >>> remove_prefix('abc', 'abcd') 'abc' """ return string[len(prefix) :] if string.startswith(prefix) else string
def pad_in(string: str, space: int) -> str: """ >>> pad_in('abc', 0) 'abc' >>> pad_in('abc', 2) ' abc' """ return ''.join([' '] * space) + string def without_ends(string: str) -> str: """ >>> without_ends('abc') 'b' """ return string[1:-1] def without_first(string: str) -> str: """ >>> without_first('abc') 'bc' """ return string[1:] def without_last(string: str) -> str: """ >>> without_last('abc') 'ab' """ return string[:-1] def quote(string: str) -> str: """ >>> quote('abc') '"abc"' >>> quote('"abc"') '"abc"' """ return string if string.startswith('"') and string.endswith('"') else f'"{string}"' def handle(string: str) -> str: """ >>> handle('https://github.com/user/repo') 'user/repo' >>> handle('user/repo') 'user/repo' >>> handle('') '' """ splt = string.split('/') return '/'.join(splt[-2:] if len(splt) >= 2 else splt) def pluralize(count: int, unit: str) -> str: """ Pluralize a count and given its units. >>> pluralize(1, 'file') '1 file' >>> pluralize(2, 'file') '2 files' >>> pluralize(0, 'file') '0 files' """ return f"{count} {unit}{('s' if count != 1 else '')}" def remove_prefix(string: str, prefix: str) -> str: """ >>> remove_prefix('abc', 'ab') 'c' >>> remove_prefix('abc', 'd') 'abc' >>> remove_prefix('abc', 'abcd') 'abc' """ return string[len(prefix):] if string.startswith(prefix) else string
def algorithm(K1,K2, B, weight_vec, datapoints, true_labels, lambda_lasso, penalty_func_name='norm1', calculate_score=False): ''' Outer loop is gradient ascent algorithm, the variable 'new_weight_vec' here is the dual variable we are interested in Inner loop is still our Nlasso algorithm :param K1,K2 : the number of iterations :param D: the block incidence matrix :param weight_vec: a list containing the edges's weights of the graph :param datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 :param true_labels: a list containing the true labels of the nodes :param samplingset: the sampling set :param lambda_lasso: the parameter lambda :param penalty_func_name: the name of the penalty function used in the algorithm :return new_w: the predicted weigh vectors for each node ''' ''' Sigma: the block diagonal matrix Sigma ''' Sigma = np.diag(np.full(weight_vec.shape, 0.9 / 2)) T_matrix = np.diag(np.array((1.0 / (np.sum(abs(B), 0)))).ravel()) ''' T_matrix: the block diagonal matrix T ''' E, N = B.shape ''' shape of the graph ''' m, n = datapoints[1]['features'].shape ''' shape of the feature vectors of each node in the graph ''' # # define the penalty function # if penalty_func_name == 'norm1': # penalty_func = Norm1Pelanty(lambda_lasso, weight_vec, Sigma, n) # elif penalty_func_name == 'norm2': # penalty_func = Norm2Pelanty(lambda_lasso, weight_vec, Sigma, n) # elif penalty_func_name == 'mocha': # penalty_func = MOCHAPelanty(lambda_lasso, weight_vec, Sigma, n) # else: # raise Exception('Invalid penalty name') new_w = np.array([np.zeros(n) for i in range(N)]) new_u = np.array([np.zeros(n) for i in range(E)]) new_weight_vec = weight_vec # starting algorithm 1 Loss = {} iteration_scores = [] for j in range(K1): new_B = np.dot(np.diag(new_weight_vec),B) T_matrix = np.diag(np.array((1.0 / (np.sum(abs(new_B), 0)))).ravel()) T = np.array((1.0 / (np.sum(abs(new_B), 0)))).ravel() for iterk in range(K2): # if iterk % 100 == 0: # print ('iter:', iterk) prev_w = np.copy(new_w) # line 2 algorithm 1 hat_w = new_w - np.dot(T_matrix, np.dot(new_B.T, new_u)) for i in range(N): optimizer = datapoints[i]['optimizer'] new_w[i] = optimizer.optimize(datapoints[i]['features'], datapoints[i]['label'], hat_w[i], T[i]) # line 9 algortihm 1 tilde_w = 2 * new_w - prev_w new_u = new_u + np.dot(Sigma, np.dot(new_B, tilde_w)) penalty_func = Norm1Pelanty(lambda_lasso, new_weight_vec, Sigma, n) new_u = penalty_func.update(new_u) new_weight_vec = new_weight_vec +0.1*np.linalg.norm(np.dot(B, new_w),ord=1,axis=1) # # calculate the MSE of the predicted weight vectors # if calculate_score: # Y_pred = [] # for i in range(N): # Y_pred.append(np.dot(datapoints[i]['features'], new_w[i])) # iteration_scores.append(mean_squared_error(true_labels.reshape(N, m), Y_pred)) Loss[j] = total_loss(datapoints,new_w,new_B,new_weight_vec) return new_w, new_weight_vec,Loss,iteration_scores
def algorithm(K1, K2, B, weight_vec, datapoints, true_labels, lambda_lasso, penalty_func_name='norm1', calculate_score=False): """ Outer loop is gradient ascent algorithm, the variable 'new_weight_vec' here is the dual variable we are interested in Inner loop is still our Nlasso algorithm :param K1,K2 : the number of iterations :param D: the block incidence matrix :param weight_vec: a list containing the edges's weights of the graph :param datapoints: a dictionary containing the data of each node in the graph needed for the algorithm 1 :param true_labels: a list containing the true labels of the nodes :param samplingset: the sampling set :param lambda_lasso: the parameter lambda :param penalty_func_name: the name of the penalty function used in the algorithm :return new_w: the predicted weigh vectors for each node """ '\n Sigma: the block diagonal matrix Sigma\n \n \n ' sigma = np.diag(np.full(weight_vec.shape, 0.9 / 2)) t_matrix = np.diag(np.array(1.0 / np.sum(abs(B), 0)).ravel()) '\n T_matrix: the block diagonal matrix T\n ' (e, n) = B.shape '\n shape of the graph\n ' (m, n) = datapoints[1]['features'].shape '\n shape of the feature vectors of each node in the graph\n ' new_w = np.array([np.zeros(n) for i in range(N)]) new_u = np.array([np.zeros(n) for i in range(E)]) new_weight_vec = weight_vec loss = {} iteration_scores = [] for j in range(K1): new_b = np.dot(np.diag(new_weight_vec), B) t_matrix = np.diag(np.array(1.0 / np.sum(abs(new_B), 0)).ravel()) t = np.array(1.0 / np.sum(abs(new_B), 0)).ravel() for iterk in range(K2): prev_w = np.copy(new_w) hat_w = new_w - np.dot(T_matrix, np.dot(new_B.T, new_u)) for i in range(N): optimizer = datapoints[i]['optimizer'] new_w[i] = optimizer.optimize(datapoints[i]['features'], datapoints[i]['label'], hat_w[i], T[i]) tilde_w = 2 * new_w - prev_w new_u = new_u + np.dot(Sigma, np.dot(new_B, tilde_w)) penalty_func = norm1_pelanty(lambda_lasso, new_weight_vec, Sigma, n) new_u = penalty_func.update(new_u) new_weight_vec = new_weight_vec + 0.1 * np.linalg.norm(np.dot(B, new_w), ord=1, axis=1) Loss[j] = total_loss(datapoints, new_w, new_B, new_weight_vec) return (new_w, new_weight_vec, Loss, iteration_scores)
#!/usr/bin/env python2 class RDHSSet: def __init__(self, path): with open(path, 'r') as f: self.regions = [ tuple(line.strip().split()[:4]) for line in f ] self.indexmap = { x: i for i, x in enumerate(self.regions) } self.accessionIndexMap = { x[-1]: i for i, x in enumerate(self.regions) } def __len__(self): return len(self.regions) def indexesForChromosome(self, chromosome): return { self.indexmap[x] for x in self.regions if x[0] == chromosome } def indexesForChromosomes(self, chromosomes): r = set() for x in chromosomes: r = r.union(self.indexesForChromosome(x)) return r def accessionsForChromosome(self, chromosome): return { x[-1] for x in self.regions if x[0] == chromosome }
class Rdhsset: def __init__(self, path): with open(path, 'r') as f: self.regions = [tuple(line.strip().split()[:4]) for line in f] self.indexmap = {x: i for (i, x) in enumerate(self.regions)} self.accessionIndexMap = {x[-1]: i for (i, x) in enumerate(self.regions)} def __len__(self): return len(self.regions) def indexes_for_chromosome(self, chromosome): return {self.indexmap[x] for x in self.regions if x[0] == chromosome} def indexes_for_chromosomes(self, chromosomes): r = set() for x in chromosomes: r = r.union(self.indexesForChromosome(x)) return r def accessions_for_chromosome(self, chromosome): return {x[-1] for x in self.regions if x[0] == chromosome}
def getLeapYear(year): while True: if year%400==0: return year elif year%4==0 and year%100!=0: return year else: year +=1 if __name__=='__main__': leap_year_list = [] cur_year = int(input("Enter any year :")) leap_year = getLeapYear(cur_year) for i in range(15): leap_year_list.append(leap_year) leap_year +=4 print(leap_year_list)
def get_leap_year(year): while True: if year % 400 == 0: return year elif year % 4 == 0 and year % 100 != 0: return year else: year += 1 if __name__ == '__main__': leap_year_list = [] cur_year = int(input('Enter any year :')) leap_year = get_leap_year(cur_year) for i in range(15): leap_year_list.append(leap_year) leap_year += 4 print(leap_year_list)
for number in range(1, 101): if number % 3 == 0 and number % 5 == 0: print(f"{number} FizzBuzz") elif number % 3 == 0: print(f"{number} Fizz") elif number % 5 == 0: print(f"{number} Buzz") else: print(number)
for number in range(1, 101): if number % 3 == 0 and number % 5 == 0: print(f'{number} FizzBuzz') elif number % 3 == 0: print(f'{number} Fizz') elif number % 5 == 0: print(f'{number} Buzz') else: print(number)
{{notice}} workers = 4 errorlog = "{{proj_root}}/run/log/gunicorn.error" accesslog = "{{proj_root}}/run/log/gunicorn.access" loglevel = "debug" bind = ["127.0.0.1:9001"]
{{notice}} workers = 4 errorlog = '{{proj_root}}/run/log/gunicorn.error' accesslog = '{{proj_root}}/run/log/gunicorn.access' loglevel = 'debug' bind = ['127.0.0.1:9001']
class Relevance(object): '''Incooperation with result file and qrels file Attributes: qid, int, query id judgement_docid_list: list of list, judged docids in TREC qrels supervised_docid_list: list, top docids from unsupervised models, e.g. BM25, QL. ''' def __init__(self, qid, judged_docid_list, supervised_docid_list, supervised_score_list): self._qid = qid self._judged_docid_list = judged_docid_list self._supervised_docid_list = supervised_docid_list self._supervised_score_list = supervised_score_list def get_qid(self): return self._qid def get_judged_docid_list(self): return self._judged_docid_list def get_supervised_docid_list(self): return self._supervised_docid_list def get_supervised_score_list(self): return self._supervised_score_list
class Relevance(object): """Incooperation with result file and qrels file Attributes: qid, int, query id judgement_docid_list: list of list, judged docids in TREC qrels supervised_docid_list: list, top docids from unsupervised models, e.g. BM25, QL. """ def __init__(self, qid, judged_docid_list, supervised_docid_list, supervised_score_list): self._qid = qid self._judged_docid_list = judged_docid_list self._supervised_docid_list = supervised_docid_list self._supervised_score_list = supervised_score_list def get_qid(self): return self._qid def get_judged_docid_list(self): return self._judged_docid_list def get_supervised_docid_list(self): return self._supervised_docid_list def get_supervised_score_list(self): return self._supervised_score_list
#!/usr/bin/env python # encoding: utf-8 """ @author: Bocheng.Zhang @license: Apache Licence @contact: bocheng0000@gmail.com @file: config.py @time: 2019/11/6 10:35 """ # [environment] ela_url = "coreservices-mainchain-privnet.elastos.org" did_url = "coreservices-didsidechain-privnet.elastos.org" # faucet account faucet = {"address": "EVpUKLScc3BmTyJ9xnd6aGSv3o8eAtFKNw", "publickey": "0359762e6e27e6119cf041ea0cb5046869c78f3a72549f4061d63dc48d91117957", "privatekey": "f384428447fd58fc0525494bd0f6f3df23fcb49098f4b5a50a9e97a71c6d2880"} tx_fee = 100 # sela
""" @author: Bocheng.Zhang @license: Apache Licence @contact: bocheng0000@gmail.com @file: config.py @time: 2019/11/6 10:35 """ ela_url = 'coreservices-mainchain-privnet.elastos.org' did_url = 'coreservices-didsidechain-privnet.elastos.org' faucet = {'address': 'EVpUKLScc3BmTyJ9xnd6aGSv3o8eAtFKNw', 'publickey': '0359762e6e27e6119cf041ea0cb5046869c78f3a72549f4061d63dc48d91117957', 'privatekey': 'f384428447fd58fc0525494bd0f6f3df23fcb49098f4b5a50a9e97a71c6d2880'} tx_fee = 100
def our_range(*args): start = 0 end = 10 step = 1 if len(args) == 1: end = args[0] elif 2 <= len(args) <= 3: start = args[0] end = args[1] if len(args) == 3: step = args[2] elif len(args) > 3: raise SyntaxError i = start while i < end: yield i i += step for j in our_range(5): print(j) print() for j in our_range(2, 6): print(j) print() for j in our_range(3, 30, 9): print(j)
def our_range(*args): start = 0 end = 10 step = 1 if len(args) == 1: end = args[0] elif 2 <= len(args) <= 3: start = args[0] end = args[1] if len(args) == 3: step = args[2] elif len(args) > 3: raise SyntaxError i = start while i < end: yield i i += step for j in our_range(5): print(j) print() for j in our_range(2, 6): print(j) print() for j in our_range(3, 30, 9): print(j)
def part1(input_data): numbers, boards = parse_input(input_data) for number in numbers: # Put number on all boards for board in boards: for row in board: for i in range(len(row)): if number == row[i]: row[i] = "x" # Check for bingo for board in boards: for row in board: if "".join(row) == "xxxxx": return int(number) * unmarked_sum(board) for column in zip(*board): if "".join(column) == "xxxxx": return int(number) * unmarked_sum(board) def part2(input_data): numbers, boards = parse_input(input_data) board_scores = [0 for x in boards] last_index = None for number in numbers: # Put number on all boards for board in boards: for row in board: for i in range(len(row)): if number == row[i]: row[i] = "x" # Check for bingo for i, board in enumerate(boards): for row in board: if "".join(row) == "xxxxx" and board_scores[i] == 0: board_scores[i] = int(number) * unmarked_sum(board) last_index = i for column in zip(*board): if "".join(column) == "xxxxx" and board_scores[i] == 0: board_scores[i] = int(number) * unmarked_sum(board) last_index = i return board_scores[last_index] def unmarked_sum(board): return sum( map( lambda x: sum(map(lambda y: int(y) if y != "x" else 0, x)), board, ) ) def parse_input(input_data): numbers = input_data[0].split(",") boards = [] for i in range(1, len(input_data), 6): rows = input_data[i + 1 : i + 6] rows = list( map(lambda x: list(filter(lambda y: len(y) > 0, x.split(" "))), rows) ) boards.append(rows) return numbers, boards if __name__ == "__main__": with open("input", "r") as input_file: input_data = list(map(lambda x: x.strip(), input_file.readlines())) print(part1(input_data)) print(part2(input_data))
def part1(input_data): (numbers, boards) = parse_input(input_data) for number in numbers: for board in boards: for row in board: for i in range(len(row)): if number == row[i]: row[i] = 'x' for board in boards: for row in board: if ''.join(row) == 'xxxxx': return int(number) * unmarked_sum(board) for column in zip(*board): if ''.join(column) == 'xxxxx': return int(number) * unmarked_sum(board) def part2(input_data): (numbers, boards) = parse_input(input_data) board_scores = [0 for x in boards] last_index = None for number in numbers: for board in boards: for row in board: for i in range(len(row)): if number == row[i]: row[i] = 'x' for (i, board) in enumerate(boards): for row in board: if ''.join(row) == 'xxxxx' and board_scores[i] == 0: board_scores[i] = int(number) * unmarked_sum(board) last_index = i for column in zip(*board): if ''.join(column) == 'xxxxx' and board_scores[i] == 0: board_scores[i] = int(number) * unmarked_sum(board) last_index = i return board_scores[last_index] def unmarked_sum(board): return sum(map(lambda x: sum(map(lambda y: int(y) if y != 'x' else 0, x)), board)) def parse_input(input_data): numbers = input_data[0].split(',') boards = [] for i in range(1, len(input_data), 6): rows = input_data[i + 1:i + 6] rows = list(map(lambda x: list(filter(lambda y: len(y) > 0, x.split(' '))), rows)) boards.append(rows) return (numbers, boards) if __name__ == '__main__': with open('input', 'r') as input_file: input_data = list(map(lambda x: x.strip(), input_file.readlines())) print(part1(input_data)) print(part2(input_data))
load("@bazel_skylib//lib:paths.bzl", "paths") load( "//caffe2/test:defs.bzl", "define_tests", ) def define_pipeline_tests(): test_files = native.glob(["**/test_*.py"]) TESTS = {} for test_file in test_files: test_file_name = paths.basename(test_file) test_name = test_file_name.replace("test_", "").replace(".py", "") TESTS[test_name] = [test_file] define_tests( pytest = True, tests = TESTS, external_deps = [("pytest", None)], resources = ["conftest.py"], )
load('@bazel_skylib//lib:paths.bzl', 'paths') load('//caffe2/test:defs.bzl', 'define_tests') def define_pipeline_tests(): test_files = native.glob(['**/test_*.py']) tests = {} for test_file in test_files: test_file_name = paths.basename(test_file) test_name = test_file_name.replace('test_', '').replace('.py', '') TESTS[test_name] = [test_file] define_tests(pytest=True, tests=TESTS, external_deps=[('pytest', None)], resources=['conftest.py'])
# definitions.py IbConfig = { 'telepotChatId': '572145851', 'googleServiceKeyFile' : 'GoogleCloudServiceKey.json', 'telepotToken' : '679545486:AAEbCBdedlJ1lxFXpN1a-J-6LfgFQ4cAp04', 'startMessage' : 'Hallo, ich bin unsere dolmetschende Eule. Stelle mich bitte auf deine Handflaeche und ich sage dir, was dein tauber Gegenueber schreibt. Wenn du etwas sagst, teile ich das deinem Gegenueber mit', 'listenLanguage' : 'de_DE', # de_DE, en_US, en_UK, en_AU, etc. See https://cloud.google.com/speech-to-text/docs/languages 'speakLanguage' : 'de', # de, en_US, en_UK, en_AU. See gtts-cli --all 'micDeviceIndex' : 0, }
ib_config = {'telepotChatId': '572145851', 'googleServiceKeyFile': 'GoogleCloudServiceKey.json', 'telepotToken': '679545486:AAEbCBdedlJ1lxFXpN1a-J-6LfgFQ4cAp04', 'startMessage': 'Hallo, ich bin unsere dolmetschende Eule. Stelle mich bitte auf deine Handflaeche und ich sage dir, was dein tauber Gegenueber schreibt. Wenn du etwas sagst, teile ich das deinem Gegenueber mit', 'listenLanguage': 'de_DE', 'speakLanguage': 'de', 'micDeviceIndex': 0}
class Graph: def __init__(self,nodes,edges): self.graph=[] for i in range(nodes+1): self.graph.append([]) self.visited=[False]*(len(self.graph)) def addEdge(self,v1,v2): self.graph[v1].append(v2) def dfs(self,source): self.visited[source]=True print(source) for i in self.graph[source]: if not self.visited[i]: self.dfs(i) def dfs_stack(self,source): stack=[] stack.append(source) self.visited[source] = True while stack: top=stack.pop() print(top) for i in self.graph[top]: if not self.visited[i]: stack.append(i) self.visited[i]=True g=Graph(4,5) g.addEdge(1,2) g.addEdge(1,3) g.addEdge(1,4) g.addEdge(3,4) g.addEdge(4,2) g.dfs_stack(1)
class Graph: def __init__(self, nodes, edges): self.graph = [] for i in range(nodes + 1): self.graph.append([]) self.visited = [False] * len(self.graph) def add_edge(self, v1, v2): self.graph[v1].append(v2) def dfs(self, source): self.visited[source] = True print(source) for i in self.graph[source]: if not self.visited[i]: self.dfs(i) def dfs_stack(self, source): stack = [] stack.append(source) self.visited[source] = True while stack: top = stack.pop() print(top) for i in self.graph[top]: if not self.visited[i]: stack.append(i) self.visited[i] = True g = graph(4, 5) g.addEdge(1, 2) g.addEdge(1, 3) g.addEdge(1, 4) g.addEdge(3, 4) g.addEdge(4, 2) g.dfs_stack(1)
inp = list(map(float, input().split())) inp.sort(reverse=True) a, b, c = inp if a >= b + c: print('NAO FORMA TRIANGULO') else: if a ** 2 == b ** 2 + c ** 2: print('TRIANGULO RETANGULO') elif a ** 2 > b ** 2 + c ** 2: print('TRIANGULO OBTUSANGULO') else: print('TRIANGULO ACUTANGULO') if a == b and b == c: print('TRIANGULO EQUILATERO') elif a == b or a == c or b == c: print('TRIANGULO ISOSCELES')
inp = list(map(float, input().split())) inp.sort(reverse=True) (a, b, c) = inp if a >= b + c: print('NAO FORMA TRIANGULO') else: if a ** 2 == b ** 2 + c ** 2: print('TRIANGULO RETANGULO') elif a ** 2 > b ** 2 + c ** 2: print('TRIANGULO OBTUSANGULO') else: print('TRIANGULO ACUTANGULO') if a == b and b == c: print('TRIANGULO EQUILATERO') elif a == b or a == c or b == c: print('TRIANGULO ISOSCELES')
__all__ = ['CONFIG', 'get'] CONFIG = { 'model_save_dir': "./output/MicroExpression", 'num_classes': 7, 'total_images': 17245, 'epochs': 20, 'batch_size': 32, 'image_shape': [3, 224, 224], 'LEARNING_RATE': { 'params': { 'lr': 0.00375 } }, 'OPTIMIZER': { 'params': { 'momentum': 0.9 }, 'regularizer': { 'function': 'L2', 'factor': 0.000001 } }, 'LABEL_MAP': [ "disgust", "others", "sadness", "happiness", "surprise", "repression", "fear" ] } def get(full_path): for id, name in enumerate(full_path.split('.')): if id == 0: config = CONFIG config = config[name] return config
__all__ = ['CONFIG', 'get'] config = {'model_save_dir': './output/MicroExpression', 'num_classes': 7, 'total_images': 17245, 'epochs': 20, 'batch_size': 32, 'image_shape': [3, 224, 224], 'LEARNING_RATE': {'params': {'lr': 0.00375}}, 'OPTIMIZER': {'params': {'momentum': 0.9}, 'regularizer': {'function': 'L2', 'factor': 1e-06}}, 'LABEL_MAP': ['disgust', 'others', 'sadness', 'happiness', 'surprise', 'repression', 'fear']} def get(full_path): for (id, name) in enumerate(full_path.split('.')): if id == 0: config = CONFIG config = config[name] return config
n=str(input("Enter the string")) le=len(n) dig=0 alp=0 for i in range(le): if n[i].isdigit(): dig=dig+1 elif n[i].isalpha(): alp=alp+1 else: continue print("Letters count is : %d"%alp) print("Digits count is : %d"%dig)
n = str(input('Enter the string')) le = len(n) dig = 0 alp = 0 for i in range(le): if n[i].isdigit(): dig = dig + 1 elif n[i].isalpha(): alp = alp + 1 else: continue print('Letters count is : %d' % alp) print('Digits count is : %d' % dig)
# Author: Will Killian # https://www.github.com/willkill07 # # Copyright 2021 # All Rights Reserved class Lab: _all = dict() lab_id = 200 def min_id(): """ Returns the maximum number for the lab IDs (always 200) """ return 200 def max_id(): """ Returns the maximum number for the lab IDs """ return Lab.lab_id - 1 def get(id): """ Given an ID of a lab, return the instance """ return Lab._all[id] def __init__(self, name: str): # update id to be a unique identifier self.id = Lab.lab_id Lab.lab_id += 1 self.name = name Lab._all[self.id] = self def __repr__(self): """ Pretty Print representation of a course is its subject, number, and section """ return f'{self.name}'
class Lab: _all = dict() lab_id = 200 def min_id(): """ Returns the maximum number for the lab IDs (always 200) """ return 200 def max_id(): """ Returns the maximum number for the lab IDs """ return Lab.lab_id - 1 def get(id): """ Given an ID of a lab, return the instance """ return Lab._all[id] def __init__(self, name: str): self.id = Lab.lab_id Lab.lab_id += 1 self.name = name Lab._all[self.id] = self def __repr__(self): """ Pretty Print representation of a course is its subject, number, and section """ return f'{self.name}'
# -*- coding: utf-8 -*- class GeocoderException(Exception): """Base class for all the reverse geocoder module exceptions.""" class InitializationError(GeocoderException): """Catching this error will catch all initialization-related errors.""" class CsvReadError(InitializationError): """Could not open the locations csv file.""" class CsvParseError(InitializationError): """Could not parse the locations csv file."""
class Geocoderexception(Exception): """Base class for all the reverse geocoder module exceptions.""" class Initializationerror(GeocoderException): """Catching this error will catch all initialization-related errors.""" class Csvreaderror(InitializationError): """Could not open the locations csv file.""" class Csvparseerror(InitializationError): """Could not parse the locations csv file."""
#Given a decimal number n, your task is to convert it #to its binary equivalent using a recursive function. #The binary number output must be of length 8 bits. def binary(n): if n == 0: return 0 else: return (n % 2 + 10*binary(int(n // 2))) def convert_8_bit(n): s = str(n) while len(s)<8: s = '0' + s return s def dec_to_binary(n): return convert_8_bit(binary(n)) def main(): T = int(input()) n = [] for _ in range(0,T): temp = int(input()) n.append(dec_to_binary(temp)) print(*n,sep="\n") if __name__=='__main__': try: main() except: pass
def binary(n): if n == 0: return 0 else: return n % 2 + 10 * binary(int(n // 2)) def convert_8_bit(n): s = str(n) while len(s) < 8: s = '0' + s return s def dec_to_binary(n): return convert_8_bit(binary(n)) def main(): t = int(input()) n = [] for _ in range(0, T): temp = int(input()) n.append(dec_to_binary(temp)) print(*n, sep='\n') if __name__ == '__main__': try: main() except: pass
''' Author: Shuailin Chen Created Date: 2021-09-14 Last Modified: 2021-12-29 content: ''' # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() # learning policy lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='MyIterBasedRunner', max_iters=10000) # runner = dict(type='IterBasedRunner', max_iters=20000) checkpoint_config = dict(by_epoch=False, interval=100000) # evaluation = dict(interval=50, metric='mIoU', pre_eval=True) evaluation = dict(interval=1000, metric='mIoU', pre_eval=True)
""" Author: Shuailin Chen Created Date: 2021-09-14 Last Modified: 2021-12-29 content: """ optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False) runner = dict(type='MyIterBasedRunner', max_iters=10000) checkpoint_config = dict(by_epoch=False, interval=100000) evaluation = dict(interval=1000, metric='mIoU', pre_eval=True)
keys = { "accept": 30, "add": 107, "apps": 93, "attn": 246, "back": 8, "browser_back": 166, "browser_forward": 167, "cancel": 3, "capital": 20, "clear": 12, "control": 17, "convert": 28, "crsel": 247, "decimal": 110, "delete": 46, "divide": 111, "down": 40, "end": 35, "ereof": 249, "escape": 27, "execute": 43, "exsel": 248, "f1": 112, "f10": 121, "f11": 122, "f12": 123, "f13": 124, "f14": 125, "f15": 126, "f16": 127, "f17": 128, "f18": 129, "f19": 130, "f2": 113, "f20": 131, "f21": 132, "f22": 133, "f23": 134, "f24": 135, "f3": 114, "f4": 115, "f5": 116, "f6": 117, "f7": 118, "f8": 119, "f9": 120, "final": 24, "hangeul": 21, "hangul": 21, "hanja": 25, "help": 47, "home": 36, "insert": 45, "junja": 23, "kana": 21, "kanji": 25, "lbutton": 1, "lcontrol": 162, "left": 37, "lmenu": 164, "lshift": 160, "lwin": 91, "mbutton": 4, "media_next_track": 176, "media_play_pause": 179, "media_prev_track": 177, "menu": 18, "modechange": 31, "multiply": 106, "next": 34, "noname": 252, "nonconvert": 29, "numlock": 144, "numpad0": 96, "numpad1": 97, "numpad2": 98, "numpad3": 99, "numpad4": 100, "numpad5": 101, "numpad6": 102, "numpad7": 103, "numpad8": 104, "numpad9": 105, "oem_clear": 254, "pa1": 253, "pagedown": 34, "pageup": 33, "pause": 19, "play": 250, "print": 42, "prior": 33, "processkey": 229, "rbutton": 2, "rcontrol": 163, "return": 13, "right": 39, "rmenu": 165, "rshift": 161, "rwin": 92, "scroll": 145, "select": 41, "separator": 108, "shift": 16, "snapshot": 44, "space": 32, "subtract": 109, "tab": 9, "up": 38, "volume_down": 174, "volume_mute": 173, "volume_up": 175, "xbutton1": 5, "xbutton2": 6, "zoom": 251, "/": 191, ";": 218, "[": 219, "\\": 220, "]": 221, "'": 222, "=": 187, "-": 189, ";": 186, } modifiers = {"alt": 1, "control": 2, "shift": 4, "win": 8}
keys = {'accept': 30, 'add': 107, 'apps': 93, 'attn': 246, 'back': 8, 'browser_back': 166, 'browser_forward': 167, 'cancel': 3, 'capital': 20, 'clear': 12, 'control': 17, 'convert': 28, 'crsel': 247, 'decimal': 110, 'delete': 46, 'divide': 111, 'down': 40, 'end': 35, 'ereof': 249, 'escape': 27, 'execute': 43, 'exsel': 248, 'f1': 112, 'f10': 121, 'f11': 122, 'f12': 123, 'f13': 124, 'f14': 125, 'f15': 126, 'f16': 127, 'f17': 128, 'f18': 129, 'f19': 130, 'f2': 113, 'f20': 131, 'f21': 132, 'f22': 133, 'f23': 134, 'f24': 135, 'f3': 114, 'f4': 115, 'f5': 116, 'f6': 117, 'f7': 118, 'f8': 119, 'f9': 120, 'final': 24, 'hangeul': 21, 'hangul': 21, 'hanja': 25, 'help': 47, 'home': 36, 'insert': 45, 'junja': 23, 'kana': 21, 'kanji': 25, 'lbutton': 1, 'lcontrol': 162, 'left': 37, 'lmenu': 164, 'lshift': 160, 'lwin': 91, 'mbutton': 4, 'media_next_track': 176, 'media_play_pause': 179, 'media_prev_track': 177, 'menu': 18, 'modechange': 31, 'multiply': 106, 'next': 34, 'noname': 252, 'nonconvert': 29, 'numlock': 144, 'numpad0': 96, 'numpad1': 97, 'numpad2': 98, 'numpad3': 99, 'numpad4': 100, 'numpad5': 101, 'numpad6': 102, 'numpad7': 103, 'numpad8': 104, 'numpad9': 105, 'oem_clear': 254, 'pa1': 253, 'pagedown': 34, 'pageup': 33, 'pause': 19, 'play': 250, 'print': 42, 'prior': 33, 'processkey': 229, 'rbutton': 2, 'rcontrol': 163, 'return': 13, 'right': 39, 'rmenu': 165, 'rshift': 161, 'rwin': 92, 'scroll': 145, 'select': 41, 'separator': 108, 'shift': 16, 'snapshot': 44, 'space': 32, 'subtract': 109, 'tab': 9, 'up': 38, 'volume_down': 174, 'volume_mute': 173, 'volume_up': 175, 'xbutton1': 5, 'xbutton2': 6, 'zoom': 251, '/': 191, ';': 218, '[': 219, '\\': 220, ']': 221, "'": 222, '=': 187, '-': 189, ';': 186} modifiers = {'alt': 1, 'control': 2, 'shift': 4, 'win': 8}
N = int(input()) A = list(map(int, input().split())) A.sort() left, right = 0, 2*N-1 moist = A.count(0) // 2 while left < right and A[left] < 0 and A[right] > 0: if abs(A[left]) == A[right]: moist += 1 left += 1 right -= 1 elif abs(A[left]) > A[right]: left += 1 else: right -= 1 left, right = 0, 2*N-1 wet = 0 while left < right: if A[left] + A[right] <= 0: left += 1 else: wet += 1 left += 1 right -= 1 left, right = 0, 2*N-1 dry = 0 while left < right: if A[left] + A[right] >= 0: right -= 1 else: dry += 1 left += 1 right -= 1 print(dry, wet, moist)
n = int(input()) a = list(map(int, input().split())) A.sort() (left, right) = (0, 2 * N - 1) moist = A.count(0) // 2 while left < right and A[left] < 0 and (A[right] > 0): if abs(A[left]) == A[right]: moist += 1 left += 1 right -= 1 elif abs(A[left]) > A[right]: left += 1 else: right -= 1 (left, right) = (0, 2 * N - 1) wet = 0 while left < right: if A[left] + A[right] <= 0: left += 1 else: wet += 1 left += 1 right -= 1 (left, right) = (0, 2 * N - 1) dry = 0 while left < right: if A[left] + A[right] >= 0: right -= 1 else: dry += 1 left += 1 right -= 1 print(dry, wet, moist)
def printSlope(equation): xIndex = equation.find('x') if xIndex == -1: print(0) elif xIndex == 2: print(1) else: slopeString = equation[2:xIndex] if slopeString == "-": print(-1) else: print(slopeString) printSlope("y=2x+5") #prints 2 printSlope("y=-1299x+5") #prints -1299 printSlope("y=x+5") #prints 1 printSlope("y=-x+5") #prints -1 printSlope("y=5") #prints 0
def print_slope(equation): x_index = equation.find('x') if xIndex == -1: print(0) elif xIndex == 2: print(1) else: slope_string = equation[2:xIndex] if slopeString == '-': print(-1) else: print(slopeString) print_slope('y=2x+5') print_slope('y=-1299x+5') print_slope('y=x+5') print_slope('y=-x+5') print_slope('y=5')
#!/usr/bin/env python #-*- coding: utf-8 -*- """ __title__ = 'replace' __author__ = 'JieYuan' __mtime__ = '2019-05-06' """ def replace(s, dic): return s.translate(str.maketrans(dic)) if __name__ == '__main__': print(replace('abcd', {'a': '8', 'd': '88'}))
""" __title__ = 'replace' __author__ = 'JieYuan' __mtime__ = '2019-05-06' """ def replace(s, dic): return s.translate(str.maketrans(dic)) if __name__ == '__main__': print(replace('abcd', {'a': '8', 'd': '88'}))
def profile_most_probable_kmer(text, k, profile): highest_prob = -1 most_probable_kmer = "" for i in range(len(text) - k + 1): kmer = text[i : i+k] prob = 1 for index, nucleotide in enumerate(kmer): prob *= profile[nucleotide][index] if prob > highest_prob: highest_prob = prob most_probable_kmer = kmer return most_probable_kmer if __name__ == '__main__': with open("dataset_159_3.txt") as f: text = f.readline().strip() k = int(f.readline().strip()) profile = {} for letter in ['A', 'C', 'G', 'T']: profile[letter] = [float(item) for item in f.readline().strip().split()] print(profile_most_probable_kmer(text, k, profile))
def profile_most_probable_kmer(text, k, profile): highest_prob = -1 most_probable_kmer = '' for i in range(len(text) - k + 1): kmer = text[i:i + k] prob = 1 for (index, nucleotide) in enumerate(kmer): prob *= profile[nucleotide][index] if prob > highest_prob: highest_prob = prob most_probable_kmer = kmer return most_probable_kmer if __name__ == '__main__': with open('dataset_159_3.txt') as f: text = f.readline().strip() k = int(f.readline().strip()) profile = {} for letter in ['A', 'C', 'G', 'T']: profile[letter] = [float(item) for item in f.readline().strip().split()] print(profile_most_probable_kmer(text, k, profile))
ROW_COUNT = 128 COLUMN_COUNT = 8 def solve(input): return max(map(lambda i: BoardingPass(i).seat_id, input)) class BoardingPass: def __init__(self, data, row_count=ROW_COUNT, column_count=COLUMN_COUNT): self.row = self._search( data = data[:7], low_char='F', high_char='B', i=0, min_value=0, max_value=row_count ) self.column = self._search( data = data[7:], low_char='L', high_char='R', i=0, min_value=0, max_value=column_count ) self.seat_id = Seat(self.row, self.column).id def _search(self, data, low_char, high_char, i, min_value, max_value): if i == len(data) - 1: assert max_value - min_value == 2 if data[i] == low_char: return min_value elif data[i] == high_char: return max_value - 1 else: raise Exception(f'Expected {low_char} or {high_char} got {data[i]}') mid_value = int((max_value - min_value) / 2) + min_value if data[i] == low_char: return self._search(data, low_char, high_char, i + 1, min_value, mid_value) elif data[i] == high_char: return self._search(data, low_char, high_char, i + 1, mid_value, max_value) else: raise Exception(f'Expected {low_char} or {high_char} got {data[i]}') class Seat: def __init__(self, row, column): self.id = (row * 8) + column
row_count = 128 column_count = 8 def solve(input): return max(map(lambda i: boarding_pass(i).seat_id, input)) class Boardingpass: def __init__(self, data, row_count=ROW_COUNT, column_count=COLUMN_COUNT): self.row = self._search(data=data[:7], low_char='F', high_char='B', i=0, min_value=0, max_value=row_count) self.column = self._search(data=data[7:], low_char='L', high_char='R', i=0, min_value=0, max_value=column_count) self.seat_id = seat(self.row, self.column).id def _search(self, data, low_char, high_char, i, min_value, max_value): if i == len(data) - 1: assert max_value - min_value == 2 if data[i] == low_char: return min_value elif data[i] == high_char: return max_value - 1 else: raise exception(f'Expected {low_char} or {high_char} got {data[i]}') mid_value = int((max_value - min_value) / 2) + min_value if data[i] == low_char: return self._search(data, low_char, high_char, i + 1, min_value, mid_value) elif data[i] == high_char: return self._search(data, low_char, high_char, i + 1, mid_value, max_value) else: raise exception(f'Expected {low_char} or {high_char} got {data[i]}') class Seat: def __init__(self, row, column): self.id = row * 8 + column
f = open('day3.txt', 'r') data = f.readlines() oneCount = 0 threeCount = 0 fiveCount = 0 sevenCount = 0 evenCount = 0 pos1 = 0 pos3 = 0 pos5 = 0 pos7 = 0 posEven = 0 even = True for line in data: line = line.replace("\n", '') if line[pos1 % len(line)] == '#': oneCount += 1 if line[pos3 % len(line)] == '#': threeCount += 1 if line[pos5 % len(line)] == '#': fiveCount += 1 if line[pos7 % len(line)] == '#': sevenCount += 1 if line[posEven % len(line)] == '#' and even: evenCount += 1 pos1 += 1 pos3 += 3 pos5 += 5 pos7 += 7 if not even: posEven += 1 even = not even print(str(evenCount * oneCount * threeCount * fiveCount * sevenCount))
f = open('day3.txt', 'r') data = f.readlines() one_count = 0 three_count = 0 five_count = 0 seven_count = 0 even_count = 0 pos1 = 0 pos3 = 0 pos5 = 0 pos7 = 0 pos_even = 0 even = True for line in data: line = line.replace('\n', '') if line[pos1 % len(line)] == '#': one_count += 1 if line[pos3 % len(line)] == '#': three_count += 1 if line[pos5 % len(line)] == '#': five_count += 1 if line[pos7 % len(line)] == '#': seven_count += 1 if line[posEven % len(line)] == '#' and even: even_count += 1 pos1 += 1 pos3 += 3 pos5 += 5 pos7 += 7 if not even: pos_even += 1 even = not even print(str(evenCount * oneCount * threeCount * fiveCount * sevenCount))
def porrada(seq, passo): atual = 0 while len(seq) > 1: atual += passo - 1 while atual > len(seq) - 1: atual -= len(seq) seq.pop(atual) return seq[0] + 1 quantidade_de_casos = int(input()) for c in range(quantidade_de_casos): entrada = input().split() n = list(range(int(entrada[0]))) m = int(entrada[1]) print(f'Case {c + 1}: {porrada(n, m)}')
def porrada(seq, passo): atual = 0 while len(seq) > 1: atual += passo - 1 while atual > len(seq) - 1: atual -= len(seq) seq.pop(atual) return seq[0] + 1 quantidade_de_casos = int(input()) for c in range(quantidade_de_casos): entrada = input().split() n = list(range(int(entrada[0]))) m = int(entrada[1]) print(f'Case {c + 1}: {porrada(n, m)}')
n = int(input()) arr = list(map(int, input().split(' '))) arr.sort(reverse=True) res = 0 for i in range(n): res += arr[i]*2**i print(res)
n = int(input()) arr = list(map(int, input().split(' '))) arr.sort(reverse=True) res = 0 for i in range(n): res += arr[i] * 2 ** i print(res)
class Solution: def minInsertions(self, s: str) -> int: right = count = 0 for c in s: if c == '(': if right & 1: right -= 1 count += 1 right += 2 else: right -= 1 if right < 0: right = 1 count += 1 return right + count
class Solution: def min_insertions(self, s: str) -> int: right = count = 0 for c in s: if c == '(': if right & 1: right -= 1 count += 1 right += 2 else: right -= 1 if right < 0: right = 1 count += 1 return right + count
# Given an array of integers, return indices of the two numbers such that they add up to a specific target. # You may assume that each input would have exactly one solution, and you may not use the same element twice. # Example: # Given nums = [2, 7, 11, 15], target = 9, # Because nums[0] + nums[1] = 2 + 7 = 9, # return [0, 1]. class Solution(object): def twoSum(self, nums, target): """ O(m) O(m) :type nums: List[int] :type target: int :rtype: List[int] """ dic = {} for i in range(len(nums)): if nums[i] in dic: return [dic[nums[i]], i] else: dic[target- nums[i]] = i return []
class Solution(object): def two_sum(self, nums, target): """ O(m) O(m) :type nums: List[int] :type target: int :rtype: List[int] """ dic = {} for i in range(len(nums)): if nums[i] in dic: return [dic[nums[i]], i] else: dic[target - nums[i]] = i return []
S = input() N = len(S) A = [0]*(N+1) B = [0]*(N+1) for i in range(N): if S[i] == '<': A[i+1] = A[i]+1 else: A[i+1] = 0 for i in range(N): if S[N-i-1] == '>': B[N-i-1] = B[N-i]+1 else: B[N-i-1] = 0 ans = 0 for i in range(N+1): ans += max(A[i], B[i]) print(ans)
s = input() n = len(S) a = [0] * (N + 1) b = [0] * (N + 1) for i in range(N): if S[i] == '<': A[i + 1] = A[i] + 1 else: A[i + 1] = 0 for i in range(N): if S[N - i - 1] == '>': B[N - i - 1] = B[N - i] + 1 else: B[N - i - 1] = 0 ans = 0 for i in range(N + 1): ans += max(A[i], B[i]) print(ans)
''' Write a program that takes an integer argument and retums all the primes between 1 and that integer. For example, if the input is 18, you should return [2, 3, 5, 7, 11, 13, 17]. ''' def generate_primes(n): # Time: O(n log log n) | Space: O(n) primes = [] # is_prime[p] represents if p is prime or not. # Initially, set each to true, except for 0 and 1. # Then use sieving to eliminate non-primes. is_prime = [False, False] + [True] * (n - 1) for p in range(2, n + 1): if is_prime[p]: primes.append(p) # Turn false all multiples of p. for i in range(p, n+1, p): is_prime[i] = False return primes assert(generate_primes(18) == [2, 3, 5, 7, 11, 13, 17]) # Ignore even numbers and eliminate multiples of p2 # This will give better performance, but does not # reflect on complexity def generate_primes_square(n): # Time: O(n log log n) | Space: O(n) if n < 2: return [] primes = [2] # Store the first prime number # is_prime[p] represents if (2i + 3) is prime or not. # Initially, set each to true. # Then use sieving to eliminate non-primes. size = (n - 3) // 2 + 1 is_prime = [True] * size for i in range(size): if is_prime[i]: p = i * 2 + 3 primes.append(p) # Sieving fron p^2, where p^2 = (4i^2 + 12i + 9). # The index in is_prime is (2i^2 + 6i + 3) because # is_prime[i] represents 2i + 3. # Note that we need to use long for j because p^2 might overflow. for j in range(2 * i**2 + 6 * i + 3, size, p): is_prime[j] = False return primes assert(generate_primes_square(18) == [2, 3, 5, 7, 11, 13, 17])
""" Write a program that takes an integer argument and retums all the primes between 1 and that integer. For example, if the input is 18, you should return [2, 3, 5, 7, 11, 13, 17]. """ def generate_primes(n): primes = [] is_prime = [False, False] + [True] * (n - 1) for p in range(2, n + 1): if is_prime[p]: primes.append(p) for i in range(p, n + 1, p): is_prime[i] = False return primes assert generate_primes(18) == [2, 3, 5, 7, 11, 13, 17] def generate_primes_square(n): if n < 2: return [] primes = [2] size = (n - 3) // 2 + 1 is_prime = [True] * size for i in range(size): if is_prime[i]: p = i * 2 + 3 primes.append(p) for j in range(2 * i ** 2 + 6 * i + 3, size, p): is_prime[j] = False return primes assert generate_primes_square(18) == [2, 3, 5, 7, 11, 13, 17]
""" gdcdatamodel.migrations.update_case_cache -------------------- Functionality to fix stale case caches in _related_cases edge tables. """ def update_related_cases(driver, node_id): """Removes and re-adds the edge between the given node and it's parent to cascade the new relationship to _related_cases through the graph """ with driver.session_scope() as session: node = driver.nodes().ids(node_id).one() edges_out = node.edges_out for edge in edges_out: edge_cls = edge.__class__ copied_edge = edge_cls( src_id=edge.src_id, dst_id=edge.dst_id, properties=dict(edge.props), system_annotations=dict(edge.sysan), ) # Delete the edge driver.edges(edge_cls).filter( edge_cls.src_id == copied_edge.src_id, edge_cls.dst_id == copied_edge.dst_id, ).delete() session.expunge(edge) # Re-add the edge to force a refresh of the stale cache session.add(copied_edge) # Assert the edge was re-added or abort the session driver.edges(edge_cls).filter( edge_cls.src_id == copied_edge.src_id, edge_cls.dst_id == copied_edge.dst_id, ).one() def update_cache_cache_tree(driver, case): """Updates the _related_cases case cache for all children in the :param:`case` tree """ visited = set() with driver.session_scope(): for neighbor in case.edges_in: if neighbor.src_id in visited: continue update_related_cases(driver, neighbor.src_id) visited.add(neighbor.src_id)
""" gdcdatamodel.migrations.update_case_cache -------------------- Functionality to fix stale case caches in _related_cases edge tables. """ def update_related_cases(driver, node_id): """Removes and re-adds the edge between the given node and it's parent to cascade the new relationship to _related_cases through the graph """ with driver.session_scope() as session: node = driver.nodes().ids(node_id).one() edges_out = node.edges_out for edge in edges_out: edge_cls = edge.__class__ copied_edge = edge_cls(src_id=edge.src_id, dst_id=edge.dst_id, properties=dict(edge.props), system_annotations=dict(edge.sysan)) driver.edges(edge_cls).filter(edge_cls.src_id == copied_edge.src_id, edge_cls.dst_id == copied_edge.dst_id).delete() session.expunge(edge) session.add(copied_edge) driver.edges(edge_cls).filter(edge_cls.src_id == copied_edge.src_id, edge_cls.dst_id == copied_edge.dst_id).one() def update_cache_cache_tree(driver, case): """Updates the _related_cases case cache for all children in the :param:`case` tree """ visited = set() with driver.session_scope(): for neighbor in case.edges_in: if neighbor.src_id in visited: continue update_related_cases(driver, neighbor.src_id) visited.add(neighbor.src_id)
n = int(input()) l = [] for i in range(0, n): l.append(int(input())) for orig_num in l: count = 0 num = orig_num while num != 0: digit = num % 10 if digit == 0: pass elif orig_num % digit == 0: count += 1 num = int(num / 10) print (count)
n = int(input()) l = [] for i in range(0, n): l.append(int(input())) for orig_num in l: count = 0 num = orig_num while num != 0: digit = num % 10 if digit == 0: pass elif orig_num % digit == 0: count += 1 num = int(num / 10) print(count)
class Qseq(str): def __init__(self, seq): self.qkey = None self.parent = None self.parental_id = None self.parental_class = None self.name = None self.item = None def __getitem__(self, item): value = super().__getitem__(item) if self.parental_class == "QUEEN" and self.parent is not None and self.parent.topology == "circular": if type(item) == slice: if item.start is None: start = 0 elif item.start < 0: start = len(self) + item.start else: start = item.start if item.stop is None: stop = len(self) elif item.stop < 0: stop = len(self) + item.stop else: stop = item.stop if item.step is None: step = 1 if start > stop: value = str(self)[start:] + str(self)[:stop] value = value[::step] else: value = super().__getitem__(item) else: pass value = Qseq(value) else: value = Qseq(value) value.qkey = self.qkey value.parent = self.parent value.parental_id = self.parental_id value.parental_class = self.parental_class value.name = self.name if value.item is None: value.item = item else: value.item = slice(value.item.start + item.start, value.item.start + item.stop) return value
class Qseq(str): def __init__(self, seq): self.qkey = None self.parent = None self.parental_id = None self.parental_class = None self.name = None self.item = None def __getitem__(self, item): value = super().__getitem__(item) if self.parental_class == 'QUEEN' and self.parent is not None and (self.parent.topology == 'circular'): if type(item) == slice: if item.start is None: start = 0 elif item.start < 0: start = len(self) + item.start else: start = item.start if item.stop is None: stop = len(self) elif item.stop < 0: stop = len(self) + item.stop else: stop = item.stop if item.step is None: step = 1 if start > stop: value = str(self)[start:] + str(self)[:stop] value = value[::step] else: value = super().__getitem__(item) else: pass value = qseq(value) else: value = qseq(value) value.qkey = self.qkey value.parent = self.parent value.parental_id = self.parental_id value.parental_class = self.parental_class value.name = self.name if value.item is None: value.item = item else: value.item = slice(value.item.start + item.start, value.item.start + item.stop) return value
rios = { 'nilo' : 'egito', 'rio grande' : 'brasil', 'rio parana' : 'brasil' } for key,value in rios.items(): print(' O rio ' + key.title() + ' corre pelo ' + value.title()) for key in rios.keys(): print(key.title()) for value in rios.values(): print(value.title())
rios = {'nilo': 'egito', 'rio grande': 'brasil', 'rio parana': 'brasil'} for (key, value) in rios.items(): print(' O rio ' + key.title() + ' corre pelo ' + value.title()) for key in rios.keys(): print(key.title()) for value in rios.values(): print(value.title())
def hello(name): """Generate a friendly greeting. Greeting is an important part of culture. This function takes a name and generates a friendly greeting for it. Parameters ---------- name : str A name. Returns ------- str A greeting. """ return 'Hello {name}'.format(name=name)
def hello(name): """Generate a friendly greeting. Greeting is an important part of culture. This function takes a name and generates a friendly greeting for it. Parameters ---------- name : str A name. Returns ------- str A greeting. """ return 'Hello {name}'.format(name=name)
vowels2 = set('aeiiiouuu') word = input('Provide a word to search for vowels:').lower() found = vowels2.intersection(set(word)) for v in found: print(v)
vowels2 = set('aeiiiouuu') word = input('Provide a word to search for vowels:').lower() found = vowels2.intersection(set(word)) for v in found: print(v)
#!/usr/bin/env python2.7 """ Read the file agents.raw and output agents.txt, a readable file """ if __name__=="__main__": with open("agents.raw","r") as f: with open("agents.txt","w") as out: for line in f: if(line[0] in '+|'): out.write(line)
""" Read the file agents.raw and output agents.txt, a readable file """ if __name__ == '__main__': with open('agents.raw', 'r') as f: with open('agents.txt', 'w') as out: for line in f: if line[0] in '+|': out.write(line)
# -*- coding: utf-8 -*- """mul.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1fvc_bX8oobfbr_O2r1pCpgOd2CTckXe9 """ def mul(num1, num2): print('Result is: ', num1 * num2)
"""mul.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1fvc_bX8oobfbr_O2r1pCpgOd2CTckXe9 """ def mul(num1, num2): print('Result is: ', num1 * num2)
class Solution: def wordPattern(self, pattern: str, str: str) -> bool: table = {} mapped = set() words = str.split() if len(words) != len(pattern): return False for i, word in enumerate(words): if word in table: if table[word] != pattern[i]: return False else: if pattern[i] in mapped: return False table[word] = pattern[i] mapped.add(pattern[i]) return True
class Solution: def word_pattern(self, pattern: str, str: str) -> bool: table = {} mapped = set() words = str.split() if len(words) != len(pattern): return False for (i, word) in enumerate(words): if word in table: if table[word] != pattern[i]: return False else: if pattern[i] in mapped: return False table[word] = pattern[i] mapped.add(pattern[i]) return True
HOST = 'localhost' PORT = 61613 VERSION = '1.2' BROKER = 'activemq' LOGIN, PASSCODE, VIRTUALHOST = { 'activemq': ('', '', ''), 'apollo': ('admin', 'password', 'mybroker'), 'rabbitmq': ('guest', 'guest', '/') }[BROKER]
host = 'localhost' port = 61613 version = '1.2' broker = 'activemq' (login, passcode, virtualhost) = {'activemq': ('', '', ''), 'apollo': ('admin', 'password', 'mybroker'), 'rabbitmq': ('guest', 'guest', '/')}[BROKER]
# python3.7 """Configuration for testing StyleGAN on FF-HQ (1024) dataset. All settings are particularly used for one replica (GPU), such as `batch_size` and `num_workers`. """ runner_type = 'StyleGANRunner' gan_type = 'stylegan' resolution = 1024 batch_size = 16 data = dict( num_workers=4, # val=dict(root_dir='data/ffhq', resolution=resolution), val=dict(root_dir='data/ffhq.zip', data_format='zip', resolution=resolution), ) modules = dict( discriminator=dict( model=dict(gan_type=gan_type, resolution=resolution), kwargs_val=dict(), ), generator=dict( model=dict(gan_type=gan_type, resolution=resolution), kwargs_val=dict(trunc_psi=0.7, trunc_layers=8, randomize_noise=False), ) )
"""Configuration for testing StyleGAN on FF-HQ (1024) dataset. All settings are particularly used for one replica (GPU), such as `batch_size` and `num_workers`. """ runner_type = 'StyleGANRunner' gan_type = 'stylegan' resolution = 1024 batch_size = 16 data = dict(num_workers=4, val=dict(root_dir='data/ffhq.zip', data_format='zip', resolution=resolution)) modules = dict(discriminator=dict(model=dict(gan_type=gan_type, resolution=resolution), kwargs_val=dict()), generator=dict(model=dict(gan_type=gan_type, resolution=resolution), kwargs_val=dict(trunc_psi=0.7, trunc_layers=8, randomize_noise=False)))
def Num2581(): M = int(input()) N = int(input()) minPrimeNum = 0 sumPrimeNum = 0 primeNums = [] for dividend in range(M, N+1): if dividend == 1: continue primeNums.append(dividend) sumPrimeNum += dividend for divisor in range(2, dividend): if dividend % divisor == 0: primeNums.pop() sumPrimeNum -= dividend break if len(primeNums) == 0: print("-1") else: minPrimeNum = primeNums[0] sumPrimeNum = sum(primeNums) print(str(sumPrimeNum)) print(str(minPrimeNum)) Num2581()
def num2581(): m = int(input()) n = int(input()) min_prime_num = 0 sum_prime_num = 0 prime_nums = [] for dividend in range(M, N + 1): if dividend == 1: continue primeNums.append(dividend) sum_prime_num += dividend for divisor in range(2, dividend): if dividend % divisor == 0: primeNums.pop() sum_prime_num -= dividend break if len(primeNums) == 0: print('-1') else: min_prime_num = primeNums[0] sum_prime_num = sum(primeNums) print(str(sumPrimeNum)) print(str(minPrimeNum)) num2581()
#takes a list of options as input which will be printed as a numerical list #Also optionally takes messages as input to be printed after the options but before input request #Ultimately the users choice is returned def selector(options,*messages): rego = 'y' while rego.lower() == "y": n=1 for x in options: print(str(n) + ". " + str(x)) n = n + 1 n=1 print("") for arg in messages: print(arg) choice = input( " :> ") if choice == "": return False try: i_choice = int(choice) except ValueError: print("\nError: Sorry, that's not a valid entry, please enter a number from the list.") rego = input("\nWould you like to try again? Type Y for yes or N for no :> ") if rego.lower() == "y": pass else: break print("") p=1 for x in options: if int(choice) == p: value = x p = p + 1 p=1 if value in options: return value else: if not value: print("\nError: Sorry, that's not a valid entry, please enter a number from the list") rego = input("Would you like to try again? Type Y for yes or N for no :> ") return False
def selector(options, *messages): rego = 'y' while rego.lower() == 'y': n = 1 for x in options: print(str(n) + '. ' + str(x)) n = n + 1 n = 1 print('') for arg in messages: print(arg) choice = input(' :> ') if choice == '': return False try: i_choice = int(choice) except ValueError: print("\nError: Sorry, that's not a valid entry, please enter a number from the list.") rego = input('\nWould you like to try again? Type Y for yes or N for no :> ') if rego.lower() == 'y': pass else: break print('') p = 1 for x in options: if int(choice) == p: value = x p = p + 1 p = 1 if value in options: return value elif not value: print("\nError: Sorry, that's not a valid entry, please enter a number from the list") rego = input('Would you like to try again? Type Y for yes or N for no :> ') return False
"""A module to keep track of a relinearization key.""" class BFVRelinKey: """An instance of a relinearization key. The relinearization key consists of a list of values, as specified in Version 1 of the BFV paper, generated in key_generator.py. Attributes: base (int): Base used in relinearization in Version 1. keys (list of tuples of Polynomials): List of elements in the relinearization key. Each element of the list is a pair of polynomials. """ def __init__(self, base, keys): """Sets relinearization key to given inputs. Args: base (int): Base used for relinearization. keys (list of tuples of Polynomials): List of elements in the relinearization key. """ self.base = base self.keys = keys def __str__(self): """Represents RelinKey as a string. Returns: A string which represents the RelinKey. """ return 'Base: ' + str(self.base) + '\n' + str(self.keys)
"""A module to keep track of a relinearization key.""" class Bfvrelinkey: """An instance of a relinearization key. The relinearization key consists of a list of values, as specified in Version 1 of the BFV paper, generated in key_generator.py. Attributes: base (int): Base used in relinearization in Version 1. keys (list of tuples of Polynomials): List of elements in the relinearization key. Each element of the list is a pair of polynomials. """ def __init__(self, base, keys): """Sets relinearization key to given inputs. Args: base (int): Base used for relinearization. keys (list of tuples of Polynomials): List of elements in the relinearization key. """ self.base = base self.keys = keys def __str__(self): """Represents RelinKey as a string. Returns: A string which represents the RelinKey. """ return 'Base: ' + str(self.base) + '\n' + str(self.keys)
class Wrapping(object): def __init__(self, klass, wrapped_klass, dependency_functions): self._klass = klass self._wrapped_klass = wrapped_klass self._dependency_functions = dependency_functions def wrap(self): for dependency_method in self._dependency_functions: self._access_class_dependency(dependency_method) setattr(self._klass, '__getattr__', self._getattr()) setattr(self._klass, '__init__', self._init()) return self._klass def _access_class_dependency(self, method_name): def _call_dependency_method(wrapper_instance, *args, **kwargs): method = getattr(wrapper_instance._wrapped, method_name) return method(wrapper_instance._dependency, *args, **kwargs) setattr(self._klass, method_name, _call_dependency_method) def _getattr(self): def _inner(wrapper_instance, name): if hasattr(wrapper_instance._wrapped, name): return getattr(wrapper_instance._wrapped, name) raise AttributeError(self._missing_attribute_message(name)) return _inner def _missing_attribute_message(self, name): return "'{}' object has no attribute '{}'".format(self._klass.__name__, name) def _init(self): def _inner(wrapper_instance, dependency): wrapper_instance._dependency = dependency wrapper_instance._wrapped = self._wrapped_klass() return _inner def wrap_class_with_dependency(wrapped_klass, *dependency_functions): def _wrap(klass): return Wrapping(klass, wrapped_klass, dependency_functions).wrap() return _wrap
class Wrapping(object): def __init__(self, klass, wrapped_klass, dependency_functions): self._klass = klass self._wrapped_klass = wrapped_klass self._dependency_functions = dependency_functions def wrap(self): for dependency_method in self._dependency_functions: self._access_class_dependency(dependency_method) setattr(self._klass, '__getattr__', self._getattr()) setattr(self._klass, '__init__', self._init()) return self._klass def _access_class_dependency(self, method_name): def _call_dependency_method(wrapper_instance, *args, **kwargs): method = getattr(wrapper_instance._wrapped, method_name) return method(wrapper_instance._dependency, *args, **kwargs) setattr(self._klass, method_name, _call_dependency_method) def _getattr(self): def _inner(wrapper_instance, name): if hasattr(wrapper_instance._wrapped, name): return getattr(wrapper_instance._wrapped, name) raise attribute_error(self._missing_attribute_message(name)) return _inner def _missing_attribute_message(self, name): return "'{}' object has no attribute '{}'".format(self._klass.__name__, name) def _init(self): def _inner(wrapper_instance, dependency): wrapper_instance._dependency = dependency wrapper_instance._wrapped = self._wrapped_klass() return _inner def wrap_class_with_dependency(wrapped_klass, *dependency_functions): def _wrap(klass): return wrapping(klass, wrapped_klass, dependency_functions).wrap() return _wrap
""" templates for urls """ URL = """router.register(r'%(path)s', %(model)sViewSet) """ VIEWSET_IMPORT = """from %(app)s.views import %(model)sViewSet """ URL_PATTERNS = """urlpatterns = [ path("", include(router.urls)), ]""" SETUP = """from rest_framework import routers from django.urls import include, path router = routers.DefaultRouter() """
""" templates for urls """ url = "router.register(r'%(path)s', %(model)sViewSet)\n\n" viewset_import = 'from %(app)s.views import %(model)sViewSet\n' url_patterns = 'urlpatterns = [\n path("", include(router.urls)),\n]' setup = 'from rest_framework import routers\nfrom django.urls import include, path\n\nrouter = routers.DefaultRouter()\n\n'
def roman_number(num): if num > 3999: print("enter number less than 3999") return #take 2 list symbol and value symbol having roman of each integer in list value value = [1000,900,500,400,100,90,50,40,10,9,5,4,1] symbol = ["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"] roman = "" i=0 while num>0: #then we have to check the the range of value #divide the number with all values starting from zero div = num//value[i] #mod to get part of number num = num%value[i] while div: roman = roman+symbol[i] #loop goes till div become zero div = div-1 i=i+1 return roman num = int(input("enter an integer number: ")) print(f" Roman Numeral of {num} is {roman_number(num)}")
def roman_number(num): if num > 3999: print('enter number less than 3999') return value = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1] symbol = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'] roman = '' i = 0 while num > 0: div = num // value[i] num = num % value[i] while div: roman = roman + symbol[i] div = div - 1 i = i + 1 return roman num = int(input('enter an integer number: ')) print(f' Roman Numeral of {num} is {roman_number(num)}')
AN_ESCAPED_NEWLINE = "\n" AN_ESCAPED_TAB = "\t" A_COMMA = "," A_QUOTE = "'" A_TAB = "\t" DOUBLE_QUOTES = "\"" NEWLINE = "\n"
an_escaped_newline = '\n' an_escaped_tab = '\t' a_comma = ',' a_quote = "'" a_tab = '\t' double_quotes = '"' newline = '\n'
#!/usr/bin/env python3 # # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ScholarshipBuilder: @staticmethod def setName(scholarship, name): scholarship["scholarshipName"] = name @staticmethod def setId(scholarship, id): scholarship["id"] = str(int(id)) @staticmethod def setSchoolsList(scholarship, schoolsList): scholarship["schoolsList"] = schoolsList.split(",") @staticmethod def setURL(scholarship, url): scholarship["URL"] = url @staticmethod def setYear(scholarship, year): scholarship["year"] = year @staticmethod def setIntroduction(scholarship, intro): scholarship["introduction"] = intro @staticmethod def setIsRenewable(scholarship, isRenewable): scholarship["isRenewable"] = isRenewable == "yes" or isRenewable == "Yes" @staticmethod def setNumberOfYears(scholarship, years): scholarship["numberOfYears"] = years @staticmethod def setAmountPerYear(scholarship, amount): scholarship["amountPerYear"] = amount @staticmethod def setLocationRequirements(scholarship, locationRequirements): scholarship["locationRequirements"] = locationRequirements.split(';') @staticmethod def setAcademicRequirements(scholarship, academicRequirements): scholarship["academicRequirements"] = academicRequirements.split(';') @staticmethod def setEthnicityRaceRequirements(scholarship, ethnicityRaceRequirements): scholarship["ethnicityRaceRequirements"] = ethnicityRaceRequirements.split(';') @staticmethod def setGenderRequirements(scholarship, genders): scholarship["genderRequriements"] = genders.split(';') @staticmethod def setNationalOriginRequirements(scholarship, nations): scholarship["nationalOriginRequirements"] = nations.split(';') @staticmethod def setOtherRequirements(scholarship, otherRequirements): scholarship["otherRequirements"] = otherRequirements.split(';') @staticmethod def setFinancialRequirements(scholarship, financialRequirements): scholarship["financialRequirements"] = financialRequirements.split(';') @staticmethod def setApplicationProcess(scholarship, applicationProcess): scholarship["applicationProcess"] = applicationProcess @staticmethod def setSchoolsNameToIDMap(scholarship): nameToIDMap = {} for school in scholarship["schoolsList"]: nameToIDMap[school] = "" scholarship["schoolIdList"] = nameToIDMap
class Scholarshipbuilder: @staticmethod def set_name(scholarship, name): scholarship['scholarshipName'] = name @staticmethod def set_id(scholarship, id): scholarship['id'] = str(int(id)) @staticmethod def set_schools_list(scholarship, schoolsList): scholarship['schoolsList'] = schoolsList.split(',') @staticmethod def set_url(scholarship, url): scholarship['URL'] = url @staticmethod def set_year(scholarship, year): scholarship['year'] = year @staticmethod def set_introduction(scholarship, intro): scholarship['introduction'] = intro @staticmethod def set_is_renewable(scholarship, isRenewable): scholarship['isRenewable'] = isRenewable == 'yes' or isRenewable == 'Yes' @staticmethod def set_number_of_years(scholarship, years): scholarship['numberOfYears'] = years @staticmethod def set_amount_per_year(scholarship, amount): scholarship['amountPerYear'] = amount @staticmethod def set_location_requirements(scholarship, locationRequirements): scholarship['locationRequirements'] = locationRequirements.split(';') @staticmethod def set_academic_requirements(scholarship, academicRequirements): scholarship['academicRequirements'] = academicRequirements.split(';') @staticmethod def set_ethnicity_race_requirements(scholarship, ethnicityRaceRequirements): scholarship['ethnicityRaceRequirements'] = ethnicityRaceRequirements.split(';') @staticmethod def set_gender_requirements(scholarship, genders): scholarship['genderRequriements'] = genders.split(';') @staticmethod def set_national_origin_requirements(scholarship, nations): scholarship['nationalOriginRequirements'] = nations.split(';') @staticmethod def set_other_requirements(scholarship, otherRequirements): scholarship['otherRequirements'] = otherRequirements.split(';') @staticmethod def set_financial_requirements(scholarship, financialRequirements): scholarship['financialRequirements'] = financialRequirements.split(';') @staticmethod def set_application_process(scholarship, applicationProcess): scholarship['applicationProcess'] = applicationProcess @staticmethod def set_schools_name_to_id_map(scholarship): name_to_id_map = {} for school in scholarship['schoolsList']: nameToIDMap[school] = '' scholarship['schoolIdList'] = nameToIDMap
class MyClass: def __init__(self, name): self.__name = name def Show(self): print(self.__name) c1 = MyClass('c1') c2 = MyClass('c2') c3 = MyClass('c3') c4 = MyClass('c4') a = {x.Show() for x in {c1, c2, c3, c4} if x not in {c1, c2}}
class Myclass: def __init__(self, name): self.__name = name def show(self): print(self.__name) c1 = my_class('c1') c2 = my_class('c2') c3 = my_class('c3') c4 = my_class('c4') a = {x.Show() for x in {c1, c2, c3, c4} if x not in {c1, c2}}
@metadata_reactor def install_git(metadata): return { 'apt': { 'packages': { 'git': { 'installed': True, }, } }, }
@metadata_reactor def install_git(metadata): return {'apt': {'packages': {'git': {'installed': True}}}}
if __name__ == '__main__': n,m = map(int,input().split()) l = map(int,input().split()) a = set(map(int,input().split())) b = set(map(int,input().split())) happy = 0 for i in l: if i in a: happy += 1 elif i in b: happy -= 1 print(happy) # print(sum([(i in a) - (i in b) for i in l]))
if __name__ == '__main__': (n, m) = map(int, input().split()) l = map(int, input().split()) a = set(map(int, input().split())) b = set(map(int, input().split())) happy = 0 for i in l: if i in a: happy += 1 elif i in b: happy -= 1 print(happy)
#!/usr/bin/env python """Python-LCS Algorithm, Author: Matt Nichols Copied from https://github.com/man1/Python-LCS/blob/master/lcs.py Modified for running LCS inside P4SC merging algorithm """ ### solve the longest common subsequence problem # get the matrix of LCS lengths at each sub-step of the recursive process # (m+1 by n+1, where m=len(list1) & n=len(list2) ... it's one larger in each direction # so we don't have to special-case the x-1 cases at the first elements of the iteration def lcs_mat(list1, list2): m = len(list1) n = len(list2) # construct the matrix, of all zeroes mat = [[0] * (n+1) for row in range(m+1)] # populate the matrix, iteratively for row in range(1, m+1): for col in range(1, n+1): if list1[row - 1] == list2[col - 1]: # if it's the same element, it's one longer than the LCS of the truncated lists mat[row][col] = mat[row - 1][col - 1] + 1 else: # they're not the same, so it's the the maximum of the lengths of the LCSs of the two options (different list truncated in each case) mat[row][col] = max(mat[row][col - 1], mat[row - 1][col]) # the matrix is complete return mat # backtracks all the LCSs through a provided matrix def all_lcs(lcs_dict, mat, list1, list2, index1, index2): # if we've calculated it already, just return that if (lcs_dict.has_key((index1, index2))): return lcs_dict[(index1, index2)] # otherwise, calculate it recursively if (index1 == 0) or (index2 == 0): # base case return [[]] elif list1[index1 - 1] == list2[index2 - 1]: # elements are equal! Add it to all LCSs that pass through these indices lcs_dict[(index1, index2)] = [prevs + [list1[index1 - 1]] for prevs in all_lcs(lcs_dict, mat, list1, list2, index1 - 1, index2 - 1)] return lcs_dict[(index1, index2)] else: lcs_list = [] # set of sets of LCSs from here # not the same, so follow longer path recursively if mat[index1][index2 - 1] >= mat[index1 - 1][index2]: before = all_lcs(lcs_dict, mat, list1, list2, index1, index2 - 1) for series in before: # iterate through all those before if not series in lcs_list: lcs_list.append(series) # and if it's not already been found, append to lcs_list if mat[index1 - 1][index2] >= mat[index1][index2 - 1]: before = all_lcs(lcs_dict, mat, list1, list2, index1 - 1, index2) for series in before: if not series in lcs_list: lcs_list.append(series) lcs_dict[(index1, index2)] = lcs_list return lcs_list # return a set of the sets of longest common subsequences in list1 and list2 def lcs(list1, list2): # mapping of indices to list of LCSs, so we can cut down recursive calls enormously mapping = dict() # start the process... return all_lcs(mapping, lcs_mat(list1, list2), list1, list2, len(list1), len(list2));
"""Python-LCS Algorithm, Author: Matt Nichols Copied from https://github.com/man1/Python-LCS/blob/master/lcs.py Modified for running LCS inside P4SC merging algorithm """ def lcs_mat(list1, list2): m = len(list1) n = len(list2) mat = [[0] * (n + 1) for row in range(m + 1)] for row in range(1, m + 1): for col in range(1, n + 1): if list1[row - 1] == list2[col - 1]: mat[row][col] = mat[row - 1][col - 1] + 1 else: mat[row][col] = max(mat[row][col - 1], mat[row - 1][col]) return mat def all_lcs(lcs_dict, mat, list1, list2, index1, index2): if lcs_dict.has_key((index1, index2)): return lcs_dict[index1, index2] if index1 == 0 or index2 == 0: return [[]] elif list1[index1 - 1] == list2[index2 - 1]: lcs_dict[index1, index2] = [prevs + [list1[index1 - 1]] for prevs in all_lcs(lcs_dict, mat, list1, list2, index1 - 1, index2 - 1)] return lcs_dict[index1, index2] else: lcs_list = [] if mat[index1][index2 - 1] >= mat[index1 - 1][index2]: before = all_lcs(lcs_dict, mat, list1, list2, index1, index2 - 1) for series in before: if not series in lcs_list: lcs_list.append(series) if mat[index1 - 1][index2] >= mat[index1][index2 - 1]: before = all_lcs(lcs_dict, mat, list1, list2, index1 - 1, index2) for series in before: if not series in lcs_list: lcs_list.append(series) lcs_dict[index1, index2] = lcs_list return lcs_list def lcs(list1, list2): mapping = dict() return all_lcs(mapping, lcs_mat(list1, list2), list1, list2, len(list1), len(list2))
# // This is a generated file, modify: generate/templates/binding.gyp. { "targets": [{ "target_name": "nodegit", "dependencies": [ "<(module_root_dir)/vendor/libgit2.gyp:libgit2" ], "sources": [ "src/nodegit.cc", "src/wrapper.cc", "src/functions/copy.cc", "src/attr.cc", "src/blame.cc", "src/blame_hunk.cc", "src/blame_options.cc", "src/blob.cc", "src/branch.cc", "src/branch_iterator.cc", "src/buf.cc", "src/checkout.cc", "src/checkout_options.cc", "src/cherry.cc", "src/cherry_pick_options.cc", "src/clone.cc", "src/clone_options.cc", "src/commit.cc", "src/config.cc", "src/cred.cc", "src/cred_default.cc", "src/cred_userpass_payload.cc", "src/diff.cc", "src/diff_delta.cc", "src/diff_file.cc", "src/diff_hunk.cc", "src/diff_line.cc", "src/diff_options.cc", "src/diff_perfdata.cc", "src/diff_stats.cc", "src/error.cc", "src/filter.cc", "src/filter_list.cc", "src/giterr.cc", "src/graph.cc", "src/ignore.cc", "src/index.cc", "src/index_conflict_iterator.cc", "src/index_entry.cc", "src/index_time.cc", "src/indexer.cc", "src/libgit2.cc", "src/mempack.cc", "src/merge.cc", "src/merge_file_input.cc", "src/merge_head.cc", "src/merge_options.cc", "src/merge_result.cc", "src/message.cc", "src/note.cc", "src/note_iterator.cc", "src/object.cc", "src/odb.cc", "src/odb_object.cc", "src/oid.cc", "src/oid_shorten.cc", "src/packbuilder.cc", "src/patch.cc", "src/pathspec.cc", "src/pathspec_match_list.cc", "src/push.cc", "src/push_options.cc", "src/refdb.cc", "src/reference.cc", "src/reflog.cc", "src/reflog_entry.cc", "src/refspec.cc", "src/remote.cc", "src/remote_callbacks.cc", "src/repository.cc", "src/repository_init_options.cc", "src/revert.cc", "src/revert_options.cc", "src/revparse.cc", "src/revwalk.cc", "src/signature.cc", "src/smart.cc", "src/stash.cc", "src/status.cc", "src/status_list.cc", "src/strarray.cc", "src/submodule.cc", "src/tag.cc", "src/threads.cc", "src/time.cc", "src/trace.cc", "src/transfer_progress.cc", "src/transport.cc", "src/tree.cc", "src/tree_entry.cc", "src/treebuilder.cc", ], "include_dirs": [ "vendor/libv8-convert", "<!(node -e \"require('nan')\")" ], "cflags": [ "-Wall", "-std=c++11" ], "conditions": [ [ "OS=='mac'", { "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "MACOSX_DEPLOYMENT_TARGET": "10.7", "WARNING_CFLAGS": [ "-Wno-unused-variable", "-Wint-conversions", "-Wmissing-field-initializers" ], "OTHER_CPLUSPLUSFLAGS": [ "-std=gnu++11", "-stdlib=libc++" ], "OTHER_LDFLAGS": [ "-stdlib=libc++" ] } } ] ] }] }
{'targets': [{'target_name': 'nodegit', 'dependencies': ['<(module_root_dir)/vendor/libgit2.gyp:libgit2'], 'sources': ['src/nodegit.cc', 'src/wrapper.cc', 'src/functions/copy.cc', 'src/attr.cc', 'src/blame.cc', 'src/blame_hunk.cc', 'src/blame_options.cc', 'src/blob.cc', 'src/branch.cc', 'src/branch_iterator.cc', 'src/buf.cc', 'src/checkout.cc', 'src/checkout_options.cc', 'src/cherry.cc', 'src/cherry_pick_options.cc', 'src/clone.cc', 'src/clone_options.cc', 'src/commit.cc', 'src/config.cc', 'src/cred.cc', 'src/cred_default.cc', 'src/cred_userpass_payload.cc', 'src/diff.cc', 'src/diff_delta.cc', 'src/diff_file.cc', 'src/diff_hunk.cc', 'src/diff_line.cc', 'src/diff_options.cc', 'src/diff_perfdata.cc', 'src/diff_stats.cc', 'src/error.cc', 'src/filter.cc', 'src/filter_list.cc', 'src/giterr.cc', 'src/graph.cc', 'src/ignore.cc', 'src/index.cc', 'src/index_conflict_iterator.cc', 'src/index_entry.cc', 'src/index_time.cc', 'src/indexer.cc', 'src/libgit2.cc', 'src/mempack.cc', 'src/merge.cc', 'src/merge_file_input.cc', 'src/merge_head.cc', 'src/merge_options.cc', 'src/merge_result.cc', 'src/message.cc', 'src/note.cc', 'src/note_iterator.cc', 'src/object.cc', 'src/odb.cc', 'src/odb_object.cc', 'src/oid.cc', 'src/oid_shorten.cc', 'src/packbuilder.cc', 'src/patch.cc', 'src/pathspec.cc', 'src/pathspec_match_list.cc', 'src/push.cc', 'src/push_options.cc', 'src/refdb.cc', 'src/reference.cc', 'src/reflog.cc', 'src/reflog_entry.cc', 'src/refspec.cc', 'src/remote.cc', 'src/remote_callbacks.cc', 'src/repository.cc', 'src/repository_init_options.cc', 'src/revert.cc', 'src/revert_options.cc', 'src/revparse.cc', 'src/revwalk.cc', 'src/signature.cc', 'src/smart.cc', 'src/stash.cc', 'src/status.cc', 'src/status_list.cc', 'src/strarray.cc', 'src/submodule.cc', 'src/tag.cc', 'src/threads.cc', 'src/time.cc', 'src/trace.cc', 'src/transfer_progress.cc', 'src/transport.cc', 'src/tree.cc', 'src/tree_entry.cc', 'src/treebuilder.cc'], 'include_dirs': ['vendor/libv8-convert', '<!(node -e "require(\'nan\')")'], 'cflags': ['-Wall', '-std=c++11'], 'conditions': [["OS=='mac'", {'xcode_settings': {'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'MACOSX_DEPLOYMENT_TARGET': '10.7', 'WARNING_CFLAGS': ['-Wno-unused-variable', '-Wint-conversions', '-Wmissing-field-initializers'], 'OTHER_CPLUSPLUSFLAGS': ['-std=gnu++11', '-stdlib=libc++'], 'OTHER_LDFLAGS': ['-stdlib=libc++']}}]]}]}
# Text for QML styles chl_style = """ <!DOCTYPE qgis PUBLIC 'http://mrcc.com/qgis.dtd' 'SYSTEM'> <qgis version="2.4.0-Chugiak" minimumScale="0" maximumScale="1e+08" hasScaleBasedVisibilityFlag="0"> <pipe> <rasterrenderer opacity="1" alphaBand="0" classificationMax="1.44451" classificationMinMaxOrigin="CumulativeCutFullExtentEstimated" band="1" classificationMin="0.01504" type="singlebandpseudocolor"> <rasterTransparency/> <rastershader> <colorrampshader colorRampType="INTERPOLATED" clip="0"> <item alpha="255" value="0.01504" label="0.015040" color="#0000ff"/> <item alpha="255" value="0.486765" label="0.486765" color="#02ff00"/> <item alpha="255" value="0.95849" label="0.958490" color="#fffa00"/> <item alpha="255" value="1.44451" label="1.444510" color="#ff0000"/> </colorrampshader> </rastershader> </rasterrenderer> <brightnesscontrast brightness="0" contrast="-6"/> <huesaturation colorizeGreen="128" colorizeOn="0" colorizeRed="255" colorizeBlue="128" grayscaleMode="0" saturation="0" colorizeStrength="100"/> <rasterresampler maxOversampling="2"/> </pipe> <blendMode>0</blendMode> </qgis> """ sst_style = """ <!DOCTYPE qgis PUBLIC 'http://mrcc.com/qgis.dtd' 'SYSTEM'> <qgis version="2.4.0-Chugiak" minimumScale="0" maximumScale="1e+08" hasScaleBasedVisibilityFlag="0"> <pipe> <rasterrenderer opacity="1" alphaBand="-1" classificationMax="29.129" classificationMinMaxOrigin="CumulativeCutFullExtentEstimated" band="1" classificationMin="-1.66383" type="singlebandpseudocolor"> <rasterTransparency/> <rastershader> <colorrampshader colorRampType="INTERPOLATED" clip="0"> <item alpha="255" value="-1.66383" label="-1.663830" color="#2c7bb6"/> <item alpha="255" value="6.03438" label="6.034378" color="#abd9e9"/> <item alpha="255" value="13.7326" label="13.732585" color="#ffffbf"/> <item alpha="255" value="21.4308" label="21.430792" color="#fdae61"/> <item alpha="255" value="29.129" label="29.129000" color="#d7191c"/> </colorrampshader> </rastershader> </rasterrenderer> <brightnesscontrast brightness="0" contrast="0"/> <huesaturation colorizeGreen="128" colorizeOn="0" colorizeRed="255" colorizeBlue="128" grayscaleMode="0" saturation="0" colorizeStrength="100"/> <rasterresampler maxOversampling="2"/> </pipe> <blendMode>0</blendMode> </qgis> """ def makeqml(pth, style): f = open('{}/style.qml'.format(pth), 'w') f.write(style) f.close() return '{}/style.qml'.format(pth)
chl_style = '\n<!DOCTYPE qgis PUBLIC \'http://mrcc.com/qgis.dtd\' \'SYSTEM\'>\n<qgis version="2.4.0-Chugiak" minimumScale="0" maximumScale="1e+08" hasScaleBasedVisibilityFlag="0">\n <pipe>\n <rasterrenderer opacity="1" alphaBand="0" classificationMax="1.44451" classificationMinMaxOrigin="CumulativeCutFullExtentEstimated" band="1" classificationMin="0.01504" type="singlebandpseudocolor">\n <rasterTransparency/>\n <rastershader>\n <colorrampshader colorRampType="INTERPOLATED" clip="0">\n <item alpha="255" value="0.01504" label="0.015040" color="#0000ff"/>\n <item alpha="255" value="0.486765" label="0.486765" color="#02ff00"/>\n <item alpha="255" value="0.95849" label="0.958490" color="#fffa00"/>\n <item alpha="255" value="1.44451" label="1.444510" color="#ff0000"/>\n </colorrampshader>\n </rastershader>\n </rasterrenderer>\n <brightnesscontrast brightness="0" contrast="-6"/>\n <huesaturation colorizeGreen="128" colorizeOn="0" colorizeRed="255" colorizeBlue="128" grayscaleMode="0" saturation="0" colorizeStrength="100"/>\n <rasterresampler maxOversampling="2"/>\n </pipe>\n <blendMode>0</blendMode>\n</qgis>\n' sst_style = '\n<!DOCTYPE qgis PUBLIC \'http://mrcc.com/qgis.dtd\' \'SYSTEM\'>\n<qgis version="2.4.0-Chugiak" minimumScale="0" maximumScale="1e+08" hasScaleBasedVisibilityFlag="0">\n <pipe>\n <rasterrenderer opacity="1" alphaBand="-1" classificationMax="29.129" classificationMinMaxOrigin="CumulativeCutFullExtentEstimated" band="1" classificationMin="-1.66383" type="singlebandpseudocolor">\n <rasterTransparency/>\n <rastershader>\n <colorrampshader colorRampType="INTERPOLATED" clip="0">\n <item alpha="255" value="-1.66383" label="-1.663830" color="#2c7bb6"/>\n <item alpha="255" value="6.03438" label="6.034378" color="#abd9e9"/>\n <item alpha="255" value="13.7326" label="13.732585" color="#ffffbf"/>\n <item alpha="255" value="21.4308" label="21.430792" color="#fdae61"/>\n <item alpha="255" value="29.129" label="29.129000" color="#d7191c"/>\n </colorrampshader>\n </rastershader>\n </rasterrenderer>\n <brightnesscontrast brightness="0" contrast="0"/>\n <huesaturation colorizeGreen="128" colorizeOn="0" colorizeRed="255" colorizeBlue="128" grayscaleMode="0" saturation="0" colorizeStrength="100"/>\n <rasterresampler maxOversampling="2"/>\n </pipe>\n <blendMode>0</blendMode>\n</qgis>\n' def makeqml(pth, style): f = open('{}/style.qml'.format(pth), 'w') f.write(style) f.close() return '{}/style.qml'.format(pth)
a = 10 b = 20 c = 300000
a = 10 b = 20 c = 300000
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def averageOfLevels(self, root): """ :type root: TreeNode :rtype: List[float] """ if not root: return [] result = [] level = [root] while level: new_level = [] total = 0 for node in level: total += node.val if node.left: new_level.append(node.left) if node.right: new_level.append(node.right) result.append(total/len(level)) level = new_level return result
class Solution: def average_of_levels(self, root): """ :type root: TreeNode :rtype: List[float] """ if not root: return [] result = [] level = [root] while level: new_level = [] total = 0 for node in level: total += node.val if node.left: new_level.append(node.left) if node.right: new_level.append(node.right) result.append(total / len(level)) level = new_level return result
class Solution: def addBinary(self, a: str, b: str) -> str: x=int(a,2) y=int(b,2) return bin(x+y)[2:]
class Solution: def add_binary(self, a: str, b: str) -> str: x = int(a, 2) y = int(b, 2) return bin(x + y)[2:]
for i in range(int(input())): n = int(input()) s = input() a,b = [-1 for j in range(26)],[1e5 for j in range(26)] for j in range(n): if(a[ord(s[j])-97]==-1): a[ord(s[j])-97] = j else: if(j-a[ord(s[j])-97]<b[ord(s[j])-97]): b[ord(s[j])-97] = j-a[ord(s[j])-97] a[ord(s[j])-97] = j print(max(n-min(b),0))
for i in range(int(input())): n = int(input()) s = input() (a, b) = ([-1 for j in range(26)], [100000.0 for j in range(26)]) for j in range(n): if a[ord(s[j]) - 97] == -1: a[ord(s[j]) - 97] = j else: if j - a[ord(s[j]) - 97] < b[ord(s[j]) - 97]: b[ord(s[j]) - 97] = j - a[ord(s[j]) - 97] a[ord(s[j]) - 97] = j print(max(n - min(b), 0))
class Solution: def minDistance(self, word1: str, word2: str) -> int: edits = [[x for x in range(len(word1) + 1)] for y in range(len(word2) + 1)] for i in range(1, len(word2) + 1): edits[i][0] = edits[i - 1][0] + 1 for i in range(1, len(word2) + 1): for j in range(1, len(word1) + 1): if word2[i - 1] == word1[j - 1]: edits[i][j] = edits[i - 1][j - 1] else: edits[i][j] = 1 + min(edits[i][j - 1], edits[i - 1][j - 1], edits[i - 1][j]) return edits[-1][-1]
class Solution: def min_distance(self, word1: str, word2: str) -> int: edits = [[x for x in range(len(word1) + 1)] for y in range(len(word2) + 1)] for i in range(1, len(word2) + 1): edits[i][0] = edits[i - 1][0] + 1 for i in range(1, len(word2) + 1): for j in range(1, len(word1) + 1): if word2[i - 1] == word1[j - 1]: edits[i][j] = edits[i - 1][j - 1] else: edits[i][j] = 1 + min(edits[i][j - 1], edits[i - 1][j - 1], edits[i - 1][j]) return edits[-1][-1]
ENHANCED_PEER_MOBILIZATION = 'enhanced_peer_mobilization' CHAMP_CAMEROON = 'champ_client_forms' TARGET_XMLNS = 'http://openrosa.org/formdesigner/A79467FD-4CDE-47B6-8218-4394699A5C95' PREVENTION_XMLNS = 'http://openrosa.org/formdesigner/DF2FBEEA-31DE-4537-9913-07D57591502C' POST_TEST_XMLNS = 'http://openrosa.org/formdesigner/E2B4FD32-9A62-4AE8-AAB0-0CE4B8C28AA1' ACCOMPAGNEMENT_XMLNS = 'http://openrosa.org/formdesigner/027DEB76-422B-434C-9F53-ECBBE21F890F' SUIVI_MEDICAL_XMLNS = 'http://openrosa.org/formdesigner/7C8BC256-0E79-4A96-9ECB-D6D8C50CD69E'
enhanced_peer_mobilization = 'enhanced_peer_mobilization' champ_cameroon = 'champ_client_forms' target_xmlns = 'http://openrosa.org/formdesigner/A79467FD-4CDE-47B6-8218-4394699A5C95' prevention_xmlns = 'http://openrosa.org/formdesigner/DF2FBEEA-31DE-4537-9913-07D57591502C' post_test_xmlns = 'http://openrosa.org/formdesigner/E2B4FD32-9A62-4AE8-AAB0-0CE4B8C28AA1' accompagnement_xmlns = 'http://openrosa.org/formdesigner/027DEB76-422B-434C-9F53-ECBBE21F890F' suivi_medical_xmlns = 'http://openrosa.org/formdesigner/7C8BC256-0E79-4A96-9ECB-D6D8C50CD69E'
def attack(state, attacking_moves_sequence): new_state = state.__deepcopy__() ATTACKING_TERRITORY_INDEX = 0 ENEMY_TERRITORY_INDEX = 1 for attacking_move in attacking_moves_sequence: print("in attack attacking move",attacking_move) print("new state map in attack", len(new_state.map)) print("enemy",attacking_move[ENEMY_TERRITORY_INDEX].territory_name) attacking_territory = new_state.get_territory(attacking_move[ATTACKING_TERRITORY_INDEX]) enemy_territory = new_state.get_territory(attacking_move[ENEMY_TERRITORY_INDEX]) # print("in attack attacking and enemy",attacking_territory, enemy_territory ) # print("in attack ", len(new_state.get_owned_territories(attacking_territory.owner)), attacking_territory.number_of_armies, enemy_territory.number_of_armies) # print("attacking army before ",attacking_territory.number_of_armies) # print("enemy army before ",enemy_territory.number_of_armies) attacking_territory.number_of_armies -= 1 enemy_territory.number_of_armies = 1 if attacking_territory.number_of_armies <= 0 or enemy_territory.number_of_armies <= 0 : raise(ValueError("Not valid attack move sequence")) enemy_territory.owner = attacking_territory.owner print("owner",enemy_territory.owner, attacking_territory.owner) print("owned territories after attacking",len(new_state.get_owned_territories(attacking_territory.owner))) return new_state def reinforce_territory(state, territory, additional_armies): """ Used to add reinforcement armies to a territory. Args: territory: The territory that will have the reinforcement. additional_armies: the armies which will be added to the territory. """ keys = state.adjacency_list.keys() for key in keys: if key.territory_name == territory.territory_name: key.number_of_armies += additional_armies return
def attack(state, attacking_moves_sequence): new_state = state.__deepcopy__() attacking_territory_index = 0 enemy_territory_index = 1 for attacking_move in attacking_moves_sequence: print('in attack attacking move', attacking_move) print('new state map in attack', len(new_state.map)) print('enemy', attacking_move[ENEMY_TERRITORY_INDEX].territory_name) attacking_territory = new_state.get_territory(attacking_move[ATTACKING_TERRITORY_INDEX]) enemy_territory = new_state.get_territory(attacking_move[ENEMY_TERRITORY_INDEX]) attacking_territory.number_of_armies -= 1 enemy_territory.number_of_armies = 1 if attacking_territory.number_of_armies <= 0 or enemy_territory.number_of_armies <= 0: raise value_error('Not valid attack move sequence') enemy_territory.owner = attacking_territory.owner print('owner', enemy_territory.owner, attacking_territory.owner) print('owned territories after attacking', len(new_state.get_owned_territories(attacking_territory.owner))) return new_state def reinforce_territory(state, territory, additional_armies): """ Used to add reinforcement armies to a territory. Args: territory: The territory that will have the reinforcement. additional_armies: the armies which will be added to the territory. """ keys = state.adjacency_list.keys() for key in keys: if key.territory_name == territory.territory_name: key.number_of_armies += additional_armies return
# You may need this if you really want to use a recursive solution! # It raises the cap on how many recursions can happen. Use this at your own risk! # sys.setrecursionlimit(100000) def read_lines(filename): try: f = open(filename,"r") lines = f.readlines() f.close() filtered_contents = [] for line in lines: line = line.rstrip("\n") filtered_contents.append(list(line)) #read lines converted to cells objects in list of lists return filtered_contents except FileNotFoundError: print("{} does not exist!".format(filename)) exit() def some(grid): i = 0 while i < len(grid): k = 0 while k < len(grid[i]): if (grid[i][k] == "X" ): #Find X row = int(i) col = int(k) return row,col k += 1 i += 1 def solve(filename): grid = read_lines(filename) start_pos = some(grid) x = start_pos[0] y = start_pos[1] ls = ["u","d","r","l"] pass # Base Cases # # if (x,y outside maze) return false # # if (x,y is goal) return true # # if (x,y is wall) return false # # if (x,y is air) return true def solve(mode): pass if __name__ == "__main__": solution_found = False if solution_found: pass # Print your solution... else: print("There is no possible path.")
def read_lines(filename): try: f = open(filename, 'r') lines = f.readlines() f.close() filtered_contents = [] for line in lines: line = line.rstrip('\n') filtered_contents.append(list(line)) return filtered_contents except FileNotFoundError: print('{} does not exist!'.format(filename)) exit() def some(grid): i = 0 while i < len(grid): k = 0 while k < len(grid[i]): if grid[i][k] == 'X': row = int(i) col = int(k) return (row, col) k += 1 i += 1 def solve(filename): grid = read_lines(filename) start_pos = some(grid) x = start_pos[0] y = start_pos[1] ls = ['u', 'd', 'r', 'l'] pass def solve(mode): pass if __name__ == '__main__': solution_found = False if solution_found: pass else: print('There is no possible path.')
class MultiFilterCNNModel(object): def __init__(self, max_sequences, word_index, embed_dim, embedding_matrix, filter_sizes, num_filters, dropout, learning_rate, beta_1, beta_2, epsilon, n_classes): sequence_input = Input(shape=(max_sequences,), dtype='int32') embedding_layer = Embedding(len(word_index)+1, embed_dim, input_length=max_sequences, weights=[embedding_matrix], trainable=True, mask_zero=False)(sequence_input) filter_sizes = filter_sizes.split(',') #embedding_layer_ex = K.expand_dims(embedding_layer, ) #embedding_layer_ex = Reshape()(embedding_layer) conv_0 = Conv1D(num_filters, int(filter_sizes[0]), padding='valid', kernel_initializer='normal', activation='relu')(embedding_layer) conv_1 = Conv1D(num_filters, int(filter_sizes[1]), padding='valid', kernel_initializer='normal', activation='relu')(embedding_layer) conv_2 = Conv1D(num_filters, int(filter_sizes[2]), padding='valid', kernel_initializer='normal', activation='relu')(embedding_layer) maxpool_0 = MaxPooling1D(pool_size=max_sequences - int(filter_sizes[0]) + 1, strides=1, padding='valid')(conv_0) maxpool_1 = MaxPooling1D(pool_size=max_sequences - int(filter_sizes[1]) + 1, strides=1, padding='valid')(conv_1) maxpool_2 = MaxPooling1D(pool_size=max_sequences - int(filter_sizes[2]) + 1, strides=1, padding='valid')(conv_2) merged_tensor = merge([maxpool_0, maxpool_1, maxpool_2], mode='concat', concat_axis=1) flatten = Flatten()(merged_tensor) # average_pooling = AveragePooling2D(pool_size=(sequence_length,1),strides=(1,1), # border_mode='valid', dim_ordering='tf')(inputs) # # reshape = Reshape()(average_pooling) #reshape = Reshape((3*num_filters,))(merged_tensor) dropout_layer = Dropout(dropout)(flatten) softmax_layer = Dense(output_dim=n_classes, activation='softmax')(dropout_layer) # this creates a model that includes model = Model(inputs=sequence_input, outputs=softmax_layer) adam = Adam(lr=learning_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon) #sgd = optimizers.SGD(lr=0.001, decay=1e-5, momentum=0.9, nesterov=True) model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=["accuracy"]) self.model = model
class Multifiltercnnmodel(object): def __init__(self, max_sequences, word_index, embed_dim, embedding_matrix, filter_sizes, num_filters, dropout, learning_rate, beta_1, beta_2, epsilon, n_classes): sequence_input = input(shape=(max_sequences,), dtype='int32') embedding_layer = embedding(len(word_index) + 1, embed_dim, input_length=max_sequences, weights=[embedding_matrix], trainable=True, mask_zero=False)(sequence_input) filter_sizes = filter_sizes.split(',') conv_0 = conv1_d(num_filters, int(filter_sizes[0]), padding='valid', kernel_initializer='normal', activation='relu')(embedding_layer) conv_1 = conv1_d(num_filters, int(filter_sizes[1]), padding='valid', kernel_initializer='normal', activation='relu')(embedding_layer) conv_2 = conv1_d(num_filters, int(filter_sizes[2]), padding='valid', kernel_initializer='normal', activation='relu')(embedding_layer) maxpool_0 = max_pooling1_d(pool_size=max_sequences - int(filter_sizes[0]) + 1, strides=1, padding='valid')(conv_0) maxpool_1 = max_pooling1_d(pool_size=max_sequences - int(filter_sizes[1]) + 1, strides=1, padding='valid')(conv_1) maxpool_2 = max_pooling1_d(pool_size=max_sequences - int(filter_sizes[2]) + 1, strides=1, padding='valid')(conv_2) merged_tensor = merge([maxpool_0, maxpool_1, maxpool_2], mode='concat', concat_axis=1) flatten = flatten()(merged_tensor) dropout_layer = dropout(dropout)(flatten) softmax_layer = dense(output_dim=n_classes, activation='softmax')(dropout_layer) model = model(inputs=sequence_input, outputs=softmax_layer) adam = adam(lr=learning_rate, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) self.model = model
#!usr/bin/python # -*- coding: utf-8 -*- class Database(object): """Database class holding information about node/label relationships, mappings from nodes to images and from images to status.""" def __init__(self, root_name="core"): """Constuctor for the Database class. Args: root_name (str, optional): Name given to the reference abstract category. Defaults to "core". """ # Create the graph as a dict naming descendants of each node self.graph = {root_name: set()} # Mapping between each node/label and the images referencing it self.nodes_images = {root_name: set()} # Dict holding the current status of each image self.images_status = dict() def add_nodes(self, nodes): """Method to add nodes to the Database graph, given a parent-child relationship. Args: nodes (list): List of tuples describing nodes and their parent, to be added to the graph. """ for (name, parent) in nodes: """Possible improvements here: - Introduce a hard check that each parent must exist - Pre-sort the node list for dependencies / find the source(s) node(s) + ordering in a DAG. e.g. [("core", None), ("A1", "A"), ("A", "core")] wouldn't be an issue. """ # Notify the parent node that the granularity has changed self.notify_granularity(parent) # Notify sibling nodes (parent's other children) that the coverage has changed for sibling in self.graph[parent]: self.notify_coverage(sibling) # Formally add node to the graph (as itself + child of its parent) and create entry in mapping self.graph[parent].add(name) self.graph[name] = set() self.nodes_images[name] = set() def add_extract(self, images): """Method to add image labeling information in the Database. Args: images (dict): Mappings of each image to one or multiple labels/nodes. """ for image, nodes in images.items(): # Flag keeping track of whether the current image has referenced only known nodes so far. valid_nodes = True """Nested loop for this reason: we need to update the status of an image if it references an invalid node. Therefore we need to iterate both on images and nodes. """ for node in nodes: """ From the description, it is not explicit whether invalid references to non-existent nodes should be kept. e.g. if `img003` references node `E` that is not in the graph, should this reference be kept in case node `E` is added later on. Here we choose not to keep it, however in the opposite case, we can easily move the content of the if-statement outside. """ if node in self.graph: self.nodes_images[node].add(image) else: # Report that an invalid reference was found for this image valid_nodes = False # Update the image status accordingly self.images_status[image] = "valid" if valid_nodes else "invalid" def get_extract_status(self): """Retrieve the status associated to each image, which was maintained on the fly as we updated the Database. Returns: [dict]: A dictionary of image names and their associated status. """ return self.images_status def notify_granularity(self, node): """Helper method allowing to change the status of an image if there was an extension in granularity (only on valid images, no precendence over coverage changes). Args: node (string): Name of the node whose status needs an update """ for image in self.nodes_images[node]: if self.images_status[image] == "valid": self.images_status[image] = "granularity_staged" def notify_coverage(self, node): """Helper method allowing to change the status of an image if there was an extension in coverage (only on valid images or images that have had their granularity changed). Args: node (string): Name of the node whose status needs an update """ for image in self.nodes_images[node]: if self.images_status[image] in ["valid", "granularity_staged"]: self.images_status[image] = "coverage_staged"
class Database(object): """Database class holding information about node/label relationships, mappings from nodes to images and from images to status.""" def __init__(self, root_name='core'): """Constuctor for the Database class. Args: root_name (str, optional): Name given to the reference abstract category. Defaults to "core". """ self.graph = {root_name: set()} self.nodes_images = {root_name: set()} self.images_status = dict() def add_nodes(self, nodes): """Method to add nodes to the Database graph, given a parent-child relationship. Args: nodes (list): List of tuples describing nodes and their parent, to be added to the graph. """ for (name, parent) in nodes: 'Possible improvements here:\n - Introduce a hard check that each parent must exist\n - Pre-sort the node list for dependencies / find the source(s) node(s) + ordering in a DAG.\n e.g. [("core", None), ("A1", "A"), ("A", "core")] wouldn\'t be an issue.\n ' self.notify_granularity(parent) for sibling in self.graph[parent]: self.notify_coverage(sibling) self.graph[parent].add(name) self.graph[name] = set() self.nodes_images[name] = set() def add_extract(self, images): """Method to add image labeling information in the Database. Args: images (dict): Mappings of each image to one or multiple labels/nodes. """ for (image, nodes) in images.items(): valid_nodes = True 'Nested loop for this reason: we need to update the status of an image if it references an invalid node.\n Therefore we need to iterate both on images and nodes.\n ' for node in nodes: ' From the description, it is not explicit whether invalid references to non-existent nodes should be kept. e.g. if `img003` references node `E` that is not in the graph, should this reference be kept in case node `E` is added later on.\n Here we choose not to keep it, however in the opposite case, we can easily move the content of the if-statement outside.\n ' if node in self.graph: self.nodes_images[node].add(image) else: valid_nodes = False self.images_status[image] = 'valid' if valid_nodes else 'invalid' def get_extract_status(self): """Retrieve the status associated to each image, which was maintained on the fly as we updated the Database. Returns: [dict]: A dictionary of image names and their associated status. """ return self.images_status def notify_granularity(self, node): """Helper method allowing to change the status of an image if there was an extension in granularity (only on valid images, no precendence over coverage changes). Args: node (string): Name of the node whose status needs an update """ for image in self.nodes_images[node]: if self.images_status[image] == 'valid': self.images_status[image] = 'granularity_staged' def notify_coverage(self, node): """Helper method allowing to change the status of an image if there was an extension in coverage (only on valid images or images that have had their granularity changed). Args: node (string): Name of the node whose status needs an update """ for image in self.nodes_images[node]: if self.images_status[image] in ['valid', 'granularity_staged']: self.images_status[image] = 'coverage_staged'
norm_cfg = dict(type='BN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='PSPHead', in_channels=2048, in_index=3, channels=512, pool_scales=(1, 2, 3, 6), dropout_ratio=0.1, num_classes=6, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=6, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), train_cfg=dict(), test_cfg=dict(mode='whole')) dataset_type = 'CardDataset' data_root = '../card_dataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (256, 256) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(500, 500), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(500, 500), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=8, train=dict( type='CardDataset', data_root='../card_dataset', img_dir='images', ann_dir='labels', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(500, 500), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ], split='splits/train.txt'), val=dict( type='CardDataset', data_root='../card_dataset', img_dir='images', ann_dir='labels', pipeline=[ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(500, 500), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ], split='splits/val.txt'), test=dict( type='CardDataset', data_root='../card_dataset', img_dir='images', ann_dir='labels', pipeline=[ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(500, 500), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ], split='splits/val.txt')) log_config = dict( interval=10, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) dist_params = dict(backend='nccl') log_level = 'INFO' load_from = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' resume_from = None workflow = [('train', 1)] cudnn_benchmark = True optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) # optimizer=dict( # paramwise_cfg = dict( # custom_keys={ # 'head': dict(lr_mult=10.)})) optimizer_config = dict(type='OptimizerHook') lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False) runner = dict(type='IterBasedRunner', max_iters=200) checkpoint_config = dict(by_epoch=False, interval=200, type='CheckpointHook') evaluation = dict(interval=200, metric='mIoU', by_epoch=False) work_dir = './work_dirs/tutorial' seed = 0 gpu_ids = range(0, 1)
norm_cfg = dict(type='BN', requires_grad=True) model = dict(type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict(type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict(type='PSPHead', in_channels=2048, in_index=3, channels=512, pool_scales=(1, 2, 3, 6), dropout_ratio=0.1, num_classes=6, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False, loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict(type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=6, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False, loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), train_cfg=dict(), test_cfg=dict(mode='whole')) dataset_type = 'CardDataset' data_root = '../card_dataset' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (256, 256) train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(500, 500), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])] test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(500, 500), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] data = dict(samples_per_gpu=8, workers_per_gpu=8, train=dict(type='CardDataset', data_root='../card_dataset', img_dir='images', ann_dir='labels', pipeline=[dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(500, 500), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=(256, 256), cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size=(256, 256), pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])], split='splits/train.txt'), val=dict(type='CardDataset', data_root='../card_dataset', img_dir='images', ann_dir='labels', pipeline=[dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(500, 500), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])], split='splits/val.txt'), test=dict(type='CardDataset', data_root='../card_dataset', img_dir='images', ann_dir='labels', pipeline=[dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(500, 500), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])], split='splits/val.txt')) log_config = dict(interval=10, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) dist_params = dict(backend='nccl') log_level = 'INFO' load_from = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth' resume_from = None workflow = [('train', 1)] cudnn_benchmark = True optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict(type='OptimizerHook') lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False) runner = dict(type='IterBasedRunner', max_iters=200) checkpoint_config = dict(by_epoch=False, interval=200, type='CheckpointHook') evaluation = dict(interval=200, metric='mIoU', by_epoch=False) work_dir = './work_dirs/tutorial' seed = 0 gpu_ids = range(0, 1)
class Solution: def longestAwesome(self, s: str) -> int: table = {0 : -1} mask = maxLen = 0 for i, c in enumerate(s): mask ^= (1 << int(c)) for j in range(10): mask2 = mask ^ (1 << j) if mask2 in table: maxLen = max(maxLen, i - table[mask2]) if mask in table: maxLen = max(maxLen, i - table[mask]) else: table[mask] = i return maxLen
class Solution: def longest_awesome(self, s: str) -> int: table = {0: -1} mask = max_len = 0 for (i, c) in enumerate(s): mask ^= 1 << int(c) for j in range(10): mask2 = mask ^ 1 << j if mask2 in table: max_len = max(maxLen, i - table[mask2]) if mask in table: max_len = max(maxLen, i - table[mask]) else: table[mask] = i return maxLen
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer # and a TokenCharactersIndexer; see the code snippet above. This time we're using # more intuitive names for the indexers and embedders. token_tensor = { 'tokens': {'tokens': torch.LongTensor([[2, 4, 3, 5]])}, 'token_characters': {'token_characters': torch.LongTensor( [[[2, 5, 3], [4, 0, 0], [2, 1, 4], [5, 4, 0]]])} } # This is for embedding each token. embedding = Embedding(num_embeddings=6, embedding_dim=3) # This is for encoding the characters in each token. character_embedding = Embedding(num_embeddings=6, embedding_dim=3) cnn_encoder = CnnEncoder(embedding_dim=3, num_filters=4, ngram_filter_sizes=[3]) token_encoder = TokenCharactersEncoder(character_embedding, cnn_encoder) embedder = BasicTextFieldEmbedder( token_embedders={'tokens': embedding, 'token_characters': token_encoder}) embedded_tokens = embedder(token_tensor) print(embedded_tokens) # This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer, # a TokenCharactersIndexer, and another SingleIdTokenIndexer for PoS tags; # see the code above. token_tensor = { 'tokens': {'tokens': torch.LongTensor([[2, 4, 3, 5]])}, 'token_characters': {'token_characters': torch.LongTensor( [[[2, 5, 3], [4, 0, 0], [2, 1, 4], [5, 4, 0]]])}, 'pos_tag_tokens': {'tokens': torch.tensor([[2, 5, 3, 4]])} } vocab = Vocabulary() vocab.add_tokens_to_namespace( ['This', 'is', 'some', 'text', '.'], namespace='token_vocab') vocab.add_tokens_to_namespace( ['T', 'h', 'i', 's', ' ', 'o', 'm', 'e', 't', 'x', '.'], namespace='character_vocab') vocab.add_tokens_to_namespace( ['DT', 'VBZ', 'NN', '.'], namespace='pos_tag_vocab') # Notice below how the 'vocab_namespace' parameter matches the name used above. # We're showing here how the code works when we're constructing the Embedding from # a configuration file, where the vocabulary object gets passed in behind the # scenes (but the vocab_namespace parameter must be set in the config). If you are # using a `build_model` method (see the quick start chapter) or instantiating the # Embedding yourself directly, you can just grab the vocab size yourself and pass # in num_embeddings, as we do above. # This is for embedding each token. embedding = Embedding(embedding_dim=3, vocab_namespace='token_vocab', vocab=vocab) # This is for encoding the characters in each token. character_embedding = Embedding(embedding_dim=4, vocab_namespace='character_vocab', vocab=vocab) cnn_encoder = CnnEncoder(embedding_dim=4, num_filters=5, ngram_filter_sizes=[3]) token_encoder = TokenCharactersEncoder(character_embedding, cnn_encoder) # This is for embedding the part of speech tag of each token. pos_tag_embedding = Embedding(embedding_dim=6, vocab_namespace='pos_tag_vocab', vocab=vocab) # Notice how these keys match the keys in the token_tensor dictionary above; # these are the keys that you give to your TokenIndexers when constructing # your TextFields in the DatasetReader. embedder = BasicTextFieldEmbedder( token_embedders={'tokens': embedding, 'token_characters': token_encoder, 'pos_tag_tokens': pos_tag_embedding}) embedded_tokens = embedder(token_tensor) print(embedded_tokens)
token_tensor = {'tokens': {'tokens': torch.LongTensor([[2, 4, 3, 5]])}, 'token_characters': {'token_characters': torch.LongTensor([[[2, 5, 3], [4, 0, 0], [2, 1, 4], [5, 4, 0]]])}} embedding = embedding(num_embeddings=6, embedding_dim=3) character_embedding = embedding(num_embeddings=6, embedding_dim=3) cnn_encoder = cnn_encoder(embedding_dim=3, num_filters=4, ngram_filter_sizes=[3]) token_encoder = token_characters_encoder(character_embedding, cnn_encoder) embedder = basic_text_field_embedder(token_embedders={'tokens': embedding, 'token_characters': token_encoder}) embedded_tokens = embedder(token_tensor) print(embedded_tokens) token_tensor = {'tokens': {'tokens': torch.LongTensor([[2, 4, 3, 5]])}, 'token_characters': {'token_characters': torch.LongTensor([[[2, 5, 3], [4, 0, 0], [2, 1, 4], [5, 4, 0]]])}, 'pos_tag_tokens': {'tokens': torch.tensor([[2, 5, 3, 4]])}} vocab = vocabulary() vocab.add_tokens_to_namespace(['This', 'is', 'some', 'text', '.'], namespace='token_vocab') vocab.add_tokens_to_namespace(['T', 'h', 'i', 's', ' ', 'o', 'm', 'e', 't', 'x', '.'], namespace='character_vocab') vocab.add_tokens_to_namespace(['DT', 'VBZ', 'NN', '.'], namespace='pos_tag_vocab') embedding = embedding(embedding_dim=3, vocab_namespace='token_vocab', vocab=vocab) character_embedding = embedding(embedding_dim=4, vocab_namespace='character_vocab', vocab=vocab) cnn_encoder = cnn_encoder(embedding_dim=4, num_filters=5, ngram_filter_sizes=[3]) token_encoder = token_characters_encoder(character_embedding, cnn_encoder) pos_tag_embedding = embedding(embedding_dim=6, vocab_namespace='pos_tag_vocab', vocab=vocab) embedder = basic_text_field_embedder(token_embedders={'tokens': embedding, 'token_characters': token_encoder, 'pos_tag_tokens': pos_tag_embedding}) embedded_tokens = embedder(token_tensor) print(embedded_tokens)
{ "targets": [ { "target_name": "decompressor", "sources": ["decompressor.cpp"] }, { "target_name": "pow_solver", "sources": ["pow_solver.cpp"] } ] }
{'targets': [{'target_name': 'decompressor', 'sources': ['decompressor.cpp']}, {'target_name': 'pow_solver', 'sources': ['pow_solver.cpp']}]}
class C4FileIO(): red_wins = 0 blue_wins = 0 def __init__(self): try: f = open("c4wins.txt", "r") lines = f.readlines() f.close() if type(lines) == list: self.deserialize_file(lines) except FileNotFoundError: print("File not found, writing new file.") def deserialize_file(self, lines): for line in lines: line = line.strip("\n") w = line.split(",") if w[0] == "red_wins": self.red_wins = int(w[1]) elif w[0] == "blue_wins": self.blue_wins = int(w[1]) def serialize_file(self): f = open("c4wins.txt", "w") f_str = "" f_str += "red_wins," + str(self.red_wins) + " \n" f_str += "blue_wins," + str(self.blue_wins) + " \n" print(f_str) f.write(f_str) f.close()
class C4Fileio: red_wins = 0 blue_wins = 0 def __init__(self): try: f = open('c4wins.txt', 'r') lines = f.readlines() f.close() if type(lines) == list: self.deserialize_file(lines) except FileNotFoundError: print('File not found, writing new file.') def deserialize_file(self, lines): for line in lines: line = line.strip('\n') w = line.split(',') if w[0] == 'red_wins': self.red_wins = int(w[1]) elif w[0] == 'blue_wins': self.blue_wins = int(w[1]) def serialize_file(self): f = open('c4wins.txt', 'w') f_str = '' f_str += 'red_wins,' + str(self.red_wins) + ' \n' f_str += 'blue_wins,' + str(self.blue_wins) + ' \n' print(f_str) f.write(f_str) f.close()
# Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 load("//bazel_tools:haskell.bzl", "da_haskell_test") load("//bazel_tools/sh:sh.bzl", "sh_inline_test") def damlc_compile_test( name, srcs, main, damlc = "//compiler/damlc", stack_limit = "", heap_limit = "", **kwargs): stack_opt = "-K" + stack_limit if stack_limit else "" heap_opt = "-M" + heap_limit if heap_limit else "" sh_inline_test( name = name, data = [damlc, main] + srcs, cmd = """\ DAMLC=$$(canonicalize_rlocation $(rootpath {damlc})) MAIN=$$(canonicalize_rlocation $(rootpath {main})) TMP=$$(mktemp -d) function cleanup() {{ rm -rf "$$TMP" }} trap cleanup EXIT $$DAMLC compile $$MAIN -o $$TMP/out +RTS -s {stack_opt} {heap_opt} """.format( damlc = damlc, main = main, stack_opt = stack_opt, heap_opt = heap_opt, ), **kwargs )
load('//bazel_tools:haskell.bzl', 'da_haskell_test') load('//bazel_tools/sh:sh.bzl', 'sh_inline_test') def damlc_compile_test(name, srcs, main, damlc='//compiler/damlc', stack_limit='', heap_limit='', **kwargs): stack_opt = '-K' + stack_limit if stack_limit else '' heap_opt = '-M' + heap_limit if heap_limit else '' sh_inline_test(name=name, data=[damlc, main] + srcs, cmd='DAMLC=$$(canonicalize_rlocation $(rootpath {damlc}))\nMAIN=$$(canonicalize_rlocation $(rootpath {main}))\n\nTMP=$$(mktemp -d)\nfunction cleanup() {{\n rm -rf "$$TMP"\n}}\ntrap cleanup EXIT\n\n$$DAMLC compile $$MAIN -o $$TMP/out +RTS -s {stack_opt} {heap_opt}\n'.format(damlc=damlc, main=main, stack_opt=stack_opt, heap_opt=heap_opt), **kwargs)
class InfiniteLineup: def __init__(self, players): self.players = players def lineup(self): lineup_max = len(self.players) idx = 0 while True: if idx < lineup_max: yield self.players[idx] else: idx = 0 yield self.players[idx] idx += 1 def __repr__(self): return f'<InfiniteLineup({self.players})' def __str__(self): return f'InfiniteLineup with the players: {self.players}' astros = [ 'Springer', 'Bregman', 'Altuve', 'Correa', 'Reddick', 'Gonzalez', 'McCann', 'Davis', 'Tucker' ] full_lineup = InfiniteLineup(astros) astros_lineup = full_lineup.lineup() print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup))
class Infinitelineup: def __init__(self, players): self.players = players def lineup(self): lineup_max = len(self.players) idx = 0 while True: if idx < lineup_max: yield self.players[idx] else: idx = 0 yield self.players[idx] idx += 1 def __repr__(self): return f'<InfiniteLineup({self.players})' def __str__(self): return f'InfiniteLineup with the players: {self.players}' astros = ['Springer', 'Bregman', 'Altuve', 'Correa', 'Reddick', 'Gonzalez', 'McCann', 'Davis', 'Tucker'] full_lineup = infinite_lineup(astros) astros_lineup = full_lineup.lineup() print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup)) print(next(astros_lineup))
class StoreObject: def _log_template(self, token): return f"<{token.server.upper()} {token.method}: {' '.join([str(token.params[k]) for k in token.params])}>" async def get(self, token, *args, **kwargs): raise NotImplementedError async def set(self, token, response, *args, **kwargs): raise NotImplementedError async def post(self, token, body, *args, **kwargs): raise NotImplementedError async def put(self, token, body, *args, **kwargs): raise NotImplementedError async def clear(self, *args, **kwargs): raise NotImplementedError async def expire(self, *args, **kwargs): raise NotImplementedError async def delete(self, token, *args, **kwargs): raise NotImplementedError async def contains(self, token, *args, **kwargs): raise NotImplementedError @property def classname(self): return self.__class__.__name__
class Storeobject: def _log_template(self, token): return f"<{token.server.upper()} {token.method}: {' '.join([str(token.params[k]) for k in token.params])}>" async def get(self, token, *args, **kwargs): raise NotImplementedError async def set(self, token, response, *args, **kwargs): raise NotImplementedError async def post(self, token, body, *args, **kwargs): raise NotImplementedError async def put(self, token, body, *args, **kwargs): raise NotImplementedError async def clear(self, *args, **kwargs): raise NotImplementedError async def expire(self, *args, **kwargs): raise NotImplementedError async def delete(self, token, *args, **kwargs): raise NotImplementedError async def contains(self, token, *args, **kwargs): raise NotImplementedError @property def classname(self): return self.__class__.__name__
# ------------------------------ # 674. Longest Continuous Increasing Subsequence # # Description: # Given an unsorted array of integers, find the length of longest continuous increasing subsequence (subarray). # Example 1: # Input: [1,3,5,4,7] # Output: 3 # Explanation: The longest continuous increasing subsequence is [1,3,5], its length is 3. # Even though [1,3,5,7] is also an increasing subsequence, it's not a continuous one where 5 and 7 are separated by 4. # # Example 2: # Input: [2,2,2,2,2] # Output: 1 # Explanation: The longest continuous increasing subsequence is [2], its length is 1. # # Note: Length of the array will not exceed 10,000. # Version: 1.0 # 08/03/18 by Jianfa # ------------------------------ class Solution(object): def findLengthOfLCIS(self, nums): """ :type nums: List[int] :rtype: int """ maxlen = 1 curlen = 1 if len(nums) == 0 or len(nums) == 1: return len(nums) for i in range(len(nums) - 1): if nums[i] < nums[i+1]: curlen += 1 else: maxlen = max(maxlen, curlen) curlen = 1 maxlen = max(maxlen, curlen) return maxlen # Used for testing if __name__ == "__main__": test = Solution() # ------------------------------ # Summary: #
class Solution(object): def find_length_of_lcis(self, nums): """ :type nums: List[int] :rtype: int """ maxlen = 1 curlen = 1 if len(nums) == 0 or len(nums) == 1: return len(nums) for i in range(len(nums) - 1): if nums[i] < nums[i + 1]: curlen += 1 else: maxlen = max(maxlen, curlen) curlen = 1 maxlen = max(maxlen, curlen) return maxlen if __name__ == '__main__': test = solution()
""" Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set. get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1. set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item. TLE """ class LRUCache: # @param capacity, an integer def __init__(self, capacity): self.capacity = capacity self.times = {} self.cache = {} self.timestamp = 0 # @return an integer def get(self, key): self.timestamp += 1 if key in self.cache: self.times[key] = self.timestamp return self.cache[key] return -1 # @param key, an integer # @param value, an integer # @return nothing def set(self, key, value): self.timestamp += 1 if key in self.cache: self.times[key] = self.timestamp else: if len(self.cache) >= self.capacity: lru_key = self.get_lru_key() del self.cache[lru_key] del self.times[lru_key] self.cache[key] = value self.times[key] = self.timestamp def get_lru_key(self): min_time = self.timestamp res = None for key in self.times: if self.times[key] <= min_time: res = key min_time = self.times[key] return res
""" Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set. get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1. set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item. TLE """ class Lrucache: def __init__(self, capacity): self.capacity = capacity self.times = {} self.cache = {} self.timestamp = 0 def get(self, key): self.timestamp += 1 if key in self.cache: self.times[key] = self.timestamp return self.cache[key] return -1 def set(self, key, value): self.timestamp += 1 if key in self.cache: self.times[key] = self.timestamp else: if len(self.cache) >= self.capacity: lru_key = self.get_lru_key() del self.cache[lru_key] del self.times[lru_key] self.cache[key] = value self.times[key] = self.timestamp def get_lru_key(self): min_time = self.timestamp res = None for key in self.times: if self.times[key] <= min_time: res = key min_time = self.times[key] return res
"""grating_coupler_meep - grating coupler meep""" __version__ = "0.0.1" __author__ = "Simon Bilodeau <<46427609+simbilod@users.noreply.github.com>>" __all__ = []
"""grating_coupler_meep - grating coupler meep""" __version__ = '0.0.1' __author__ = 'Simon Bilodeau <<46427609+simbilod@users.noreply.github.com>>' __all__ = []
class SIR: def __init__(self, susceptible, infected, removed, region_id=None): self.region_id = region_id self.susceptible = susceptible self.infected = infected self.removed = removed self.total_pop = susceptible + infected + removed def copy(self): return SIR(self.susceptible, self.infected, self.removed) def __str__(self): return "SIR(S={susceptible}, I={infected}, R={removed})".format( susceptible=self.susceptible, infected=self.infected, removed=self.removed ) # defines self == other def __eq__(self, other): return isinstance(other, SIR) and ( self.susceptible == other.susceptible, self.infected == other.infected, self.removed == other.removed ) # defines self += other def __iadd__(self, other): if isinstance(other, SIR): self.susceptible += other.susceptible self.infected += other.infected self.removed += other.removed self.total_pop += other.total_pop return self elif other == 0: return self else: return NotImplemented # defines new = self + other def __add__(self, other): if isinstance(other, SIR): new = self.copy() return SIR.__iadd__(new, other) elif other == 0: return self.copy() else: return NotImplemented # defines new = other + self def __radd__(self, other): if isinstance(other, SIR): return SIR.__add__(self, other) elif other == 0: return self.copy() else: return NotImplemented def replace(self, sir): self.susceptible = sir.susceptible self.infected = sir.infected self.removed = sir.removed self.total_pop = self.susceptible + self.infected + self.removed def inc_infected(self, infected): self.susceptible -= infected self.infected += infected def inc_removed(self, removed): self.infected -= removed self.removed += removed def as_tuple(self, total=False): if total: return ( self.susceptible, self.infected, self.removed, self.total_pop ) else: return (self.susceptible, self.infected, self.removed) def transfer_to(self, add_s, add_i, add_r): """ transfers people to sir object Parameters ---------- add_s (Int) : wanted increase of susceptible add_i (Int) : wanted increase of infected add_r (Int) : wanted increase of removed Returns ------- None : Mutates the SIR object """ self.susceptible += add_s self.infected += add_i self.removed += add_r self.total_pop += add_s + add_i + add_r if self.total_pop < 0: raise Exception("total population < 0 after transferring to region") def transfer_from(self, rem_s, rem_i, rem_r): """ transfers people to sir object Parameters ---------- rem_s (Int) : wanted removal of susceptible rem_i (Int) : wanted removal of infected rem_r (Int) : wanted removal of removed Returns ------- None : Mutates the SIR object """ self.susceptible -= rem_s self.infected -= rem_i self.removed -= rem_r self.total_pop -= rem_s + rem_i + rem_r if self.total_pop < 0: raise Exception("total population < 0 after transferring from region")
class Sir: def __init__(self, susceptible, infected, removed, region_id=None): self.region_id = region_id self.susceptible = susceptible self.infected = infected self.removed = removed self.total_pop = susceptible + infected + removed def copy(self): return sir(self.susceptible, self.infected, self.removed) def __str__(self): return 'SIR(S={susceptible}, I={infected}, R={removed})'.format(susceptible=self.susceptible, infected=self.infected, removed=self.removed) def __eq__(self, other): return isinstance(other, SIR) and (self.susceptible == other.susceptible, self.infected == other.infected, self.removed == other.removed) def __iadd__(self, other): if isinstance(other, SIR): self.susceptible += other.susceptible self.infected += other.infected self.removed += other.removed self.total_pop += other.total_pop return self elif other == 0: return self else: return NotImplemented def __add__(self, other): if isinstance(other, SIR): new = self.copy() return SIR.__iadd__(new, other) elif other == 0: return self.copy() else: return NotImplemented def __radd__(self, other): if isinstance(other, SIR): return SIR.__add__(self, other) elif other == 0: return self.copy() else: return NotImplemented def replace(self, sir): self.susceptible = sir.susceptible self.infected = sir.infected self.removed = sir.removed self.total_pop = self.susceptible + self.infected + self.removed def inc_infected(self, infected): self.susceptible -= infected self.infected += infected def inc_removed(self, removed): self.infected -= removed self.removed += removed def as_tuple(self, total=False): if total: return (self.susceptible, self.infected, self.removed, self.total_pop) else: return (self.susceptible, self.infected, self.removed) def transfer_to(self, add_s, add_i, add_r): """ transfers people to sir object Parameters ---------- add_s (Int) : wanted increase of susceptible add_i (Int) : wanted increase of infected add_r (Int) : wanted increase of removed Returns ------- None : Mutates the SIR object """ self.susceptible += add_s self.infected += add_i self.removed += add_r self.total_pop += add_s + add_i + add_r if self.total_pop < 0: raise exception('total population < 0 after transferring to region') def transfer_from(self, rem_s, rem_i, rem_r): """ transfers people to sir object Parameters ---------- rem_s (Int) : wanted removal of susceptible rem_i (Int) : wanted removal of infected rem_r (Int) : wanted removal of removed Returns ------- None : Mutates the SIR object """ self.susceptible -= rem_s self.infected -= rem_i self.removed -= rem_r self.total_pop -= rem_s + rem_i + rem_r if self.total_pop < 0: raise exception('total population < 0 after transferring from region')
a = 10 b = 22 while b - a > 4: c = b - (2 * a) if c > 0: d = c + 1 else: d = c + 5 a = a + 3 b = b - 2
a = 10 b = 22 while b - a > 4: c = b - 2 * a if c > 0: d = c + 1 else: d = c + 5 a = a + 3 b = b - 2
_html = """<HTML><HEAD><?HEAD><BODY>{}</BODY><?HTML>""" def index_page(): content = """This is a simple api for exploring the <a href="http://www.geonames.org">geonames</a> <a href="http://download.geonames.org/export/dump">data</a>, specificially the cities1000 data, which is cities with population of greater than or equal to 1000 people. <p> The api is rooted at /cities endpoints include: <ul> <li>/cities/&ltgeonameid&gt</li> <li>/cities/&ltgeonameid&gt/nearest/&ltk&gt</li> <li>/cities?name=&ltname to search for&gt</li> <li>/cities?partial_name=&ltname fragment to search for&gt</li> </ul> </p> <p>Here are some examples to get you started: <ul> <li><a href="/cities/5391959">/cities/5391959</a> will take you to the local entry to San Francisco, CA, USA</li> <li><a href="/cities/5391959/nearest/3">/cities/5391959/nearest/3</a> will return a list of the three cities closest to San Francisco. Note, this will include San Francisco as the closest location to itself.</li> <li><a href="/cities?name=Oakland">/citie?name=Oakland</a> will produce a list of cities named Oakland</li> </ul> </p> """ return _html.format(content) def _city_desc(city, link_to_city_page=True): link_content = """ <ul> <li><em>Name</em>: {name}</li> <li><em>geonameid</em>: <a href="/cities/{geonameid}">{geonameid}</a></li> <li><em>Latitude</em>: {latitude}</li> <li><em>Longitude</em>: {longitude}</li> </ul>""" nolink_content = """ <ul> <li><em>Name</em>: {name}</li> <li><em>geonameid</em>: {geonameid}</li> <li><em>Latitude</em>: {latitude}</li> <li><em>Longitude</em>: {longitude}</li> </ul>""" if link_to_city_page: return link_content.format(**city) else: return nolink_content.format(**city) def city_desc_page(city): return _html.format(_city_desc(city, False)) def city_list_page(cities): content = """<ol><li>{}</li></ol>""" tmp = "</li><li>".join([_city_desc(cc) for cc in cities]) return _html.format(content.format(tmp)) def distances_page(distances): content = """<ol><li>{}</li></ol>""" _deet = """<ul> <li><em>Distance:</em> {dist}</li> <li><em>geonameid:</em> <a href="/cities/{geonameid}">{geonameid}</a></li> </ul>""" tmp = "</li><li>".join([_deet.format(**dc) for dc in distances]) return _html.format(content.format(tmp))
_html = '<HTML><HEAD><?HEAD><BODY>{}</BODY><?HTML>' def index_page(): content = 'This is a simple api for exploring the <a href="http://www.geonames.org">geonames</a> <a href="http://download.geonames.org/export/dump">data</a>, specificially the cities1000 data, which is cities with population of greater than or equal to 1000 people.\n\n<p>\nThe api is rooted at /cities endpoints include:\n<ul>\n <li>/cities/&ltgeonameid&gt</li>\n <li>/cities/&ltgeonameid&gt/nearest/&ltk&gt</li>\n <li>/cities?name=&ltname to search for&gt</li>\n <li>/cities?partial_name=&ltname fragment to search for&gt</li>\n</ul>\n</p>\n<p>Here are some examples to get you started:\n<ul>\n <li><a href="/cities/5391959">/cities/5391959</a> will take you to the local entry to San Francisco, CA, USA</li>\n <li><a href="/cities/5391959/nearest/3">/cities/5391959/nearest/3</a> will return a list of the three cities closest to San Francisco. Note, this will include San Francisco as the closest location to itself.</li>\n <li><a href="/cities?name=Oakland">/citie?name=Oakland</a> will produce a list of cities named Oakland</li>\n</ul>\n</p>\n\n' return _html.format(content) def _city_desc(city, link_to_city_page=True): link_content = '\n<ul>\n <li><em>Name</em>: {name}</li>\n <li><em>geonameid</em>: <a href="/cities/{geonameid}">{geonameid}</a></li>\n <li><em>Latitude</em>: {latitude}</li>\n <li><em>Longitude</em>: {longitude}</li>\n</ul>' nolink_content = '\n<ul>\n <li><em>Name</em>: {name}</li>\n <li><em>geonameid</em>: {geonameid}</li>\n <li><em>Latitude</em>: {latitude}</li>\n <li><em>Longitude</em>: {longitude}</li>\n</ul>' if link_to_city_page: return link_content.format(**city) else: return nolink_content.format(**city) def city_desc_page(city): return _html.format(_city_desc(city, False)) def city_list_page(cities): content = '<ol><li>{}</li></ol>' tmp = '</li><li>'.join([_city_desc(cc) for cc in cities]) return _html.format(content.format(tmp)) def distances_page(distances): content = '<ol><li>{}</li></ol>' _deet = '<ul>\n<li><em>Distance:</em> {dist}</li>\n<li><em>geonameid:</em> <a href="/cities/{geonameid}">{geonameid}</a></li>\n</ul>' tmp = '</li><li>'.join([_deet.format(**dc) for dc in distances]) return _html.format(content.format(tmp))