content
stringlengths
7
1.05M
class A: def f(self): return B() a = A() a.f() # NameError: name 'B' is not defined b = B() # NameError: name 'B' is not defined def f(): b = B() f() # NameError: name 'B' is not defined class B: pass
'''Refaça o desafio 051, lendo o primeiro termo e a razão de uma PA, mostrando os 10 primeiros termos da progressão usando a estrutura while''' # Paleta de cores: cores = {'azul': '\033[1;34m', 'vermelho': '\033[1;31m', 'limpa': '\033[m'} # Mensagem ínicial print('{:^30}{}{}'.format(cores['azul'], ' Gerador de PA 10 termos', cores['limpa'])) print('') r = int(input('Informe a razão da PA: ')) n = int(input('Informe o primeiro termo: ')) print('') c = 0 pa = n print('') while c < 10: print('{}'.format(pa), end='') c += 1 pa += r print(' --> ' if c <= 9 else '', end='') print(' FIM!') # Desafio Concluído.
def reverse_list(items): start = 0 end = len(items) - 1 while start < end: items[start], items[end] = items[end], items[start] start += 1 end -= 1 return items if __name__ == '__main__': items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] print(reverse_list(items))
#Desenvolva um gerador de tabuada, capaz de gerar a tabuada de qualquer número inteiro entre #1 a 10. O usuário deve informar de qual numero ele deseja ver a tabuada. A saída deve ser #conforme o exemplo abaixo: #o Tabuada de 5: #o 5 X 1 = 5 #o 5 X 2 = 10 #o ... #o 5 X 10 = 50 numero = int(input("Informe o numero para saber a tabuada: ")) while(numero<=0 or numero>10): numero = int(input("Informe o numero para saber a tabuada: ")) print("TABUADA") for valor in range(1,11): print(f"{numero} * {valor} = {numero*valor}")
# 迭代模式 - 迭代器框架 class BaseIterator: """迭代器""" def __init__(self, data): self.__data = data self.toBegin() def toBegin(self): """将指针移至起始位置""" self.__curIdx = -1 def toEnd(self): """将指针移至结尾位置""" self.__curIdx = len(self.__data) def next(self): """移动至下一个元素""" if(self.__curIdx < len(self.__data) - 1): self.__curIdx += 1 return True else: return False def previous(self): """移动至上一个元素""" if(self.__curIdx > 0): self.__curIdx -= 1 return True else: return False def current(self): """获取当前的元素""" return self.__data[self.__curIdx] if (self.__curIdx < len(self.__data) and self.__curIdx >= 0 ) else None def testBaseIterator(): print("从前往后遍历:") iterator = BaseIterator(range(0, 10)) while(iterator.next()): customer = iterator.current() print(customer, end='\t') print() print("从后往前遍历") iterator.toEnd() while(iterator.previous()): customer = iterator.current() print(customer, end='\t') if __name__ == "__main__": testBaseIterator() """ 从前往后遍历: 0 1 2 3 4 5 6 7 8 9 从后往前遍历 9 8 7 6 5 4 3 2 1 0 """
#!/usr/bin/python3 # # MAGIC_SEED = 1956 #PATH TRAIN_PATH = 'train' PREDICT_PATH = 'predict' # Dataset properties CSV_PATH="C:\\Users\\dmitr_000\\.keras\\datasets\\Imbalance_data.csv" # Header names DT_DSET ="Date Time" RCPOWER_DSET= "Imbalance" DISCRET =10 # The time cutoffs for the formation of the validation and test sequence in the format of the parameter passed # to the timedelta() like as 'days=<value>' or 'hours=<value>' or 'minutes=<value>' # TEST_CUT_OFF = 60 # 'hours=1' VAL_CUT_OFF = 360 # 'hours=6' 'days=1' # Log files LOG_FILE_NAME="Imbalance" #training model EPOCHS=10 N_STEPS = 32 N_FEATURES = 1 #LSTM models LSTM_POSSIBLE_TYPES={'LSTM':(0,"Vanilla_LSTM"), 'stacked LSTM':(1,"Stacked_LSTM") ,\ 'Bidirectional LSTM':(2,"B_dir_LSTM"),'CNN LSTM':(3,"CNN_LSTM")} LSTM_TYPE='LSTM' UNITS =32 #CNN models FILTERS = 64 KERNEL_SIZE = 2 POOL_SIZE = 2 FOLDER_PATH_SAVED_CNN_MODEL="ConvNN" #MLP model HIDDEN_NEYRONS = 16 DROPOUT = 0.2 FOLDER_PATH_SAVED_MLP_MODEL="MLP" # Chartin. Matplotlib.pyplot is used for charting STOP_ON_CHART_SHOW=False # simple class for logging class _loging(): pass
#pegando numero usuario num1 = int(input('Digite um valor: ')) num2 = int(input('Digite outro valor: ')) num3 = int(input('Digite ultimo valor: ')) #Checando num1 maior if num1 >= num2 and num1 >= num3: print('O maior número é {}'.format(num1)) if num2 > num3: print('O menor número é {}'.format(num3)) else: print('O menor numero é {}'.format(num2)) #checagem do num2 maior if num2 >= num1 and num2 >= num3: print('O maior número é {}'.format(num2)) if num1 > num3: print('O menor número é {}'.format(num3)) else: print('O menor número é {}'.format(num1)) #checagem do num3 maior if num3 >= num1 and num3 >= num2: print('O maior número é {}'.format(num3)) if num1 > num2: print('O menor número é {}'.format(num2)) else: print('O menor número é {}'.format(num1)) ''' Checagem para ver qual número é maior e menor Primeiro pegamos 3 valores do usuário, que serão armazenados em num1, num2, num3 Depois Usamos cada checagem para ver qual número é maior, para cada caso Após achar o maior entra em outro if para achar o menor Assim tem a verificação completa de todos os valores Em cada caso é printado o maior e menor valor, dependendo da verificação '''
def unsupervised_distr(distr): variables = {k: k + '_u' for k in distr.var + distr.cond_var if k != 'z'} distr_unsupervised = distr.replace_var(**variables) return distr_unsupervised, variables def unsupervised_distr_no_var(distr): variables = {k: k + '_u' for k in distr.var + distr.cond_var if k != 'z'} distr_unsupervised = distr.replace_var(**variables) return distr_unsupervised
class Solution: def uniqueMorseRepresentations(self, words: List[str]) -> int: table = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."] return len({''.join(table[ord(c) - ord('a')] for c in word) for word in words})
#!/bin/python3 """ Recommender algorithm Weight sum of similarity vector """ WEIGHTS = [1, 1, 1, 1] # rank, bgg_url, game_id, names, min_players, max_players, avg_time, min_time, # max_time, year, avg_rating, geek_rating, num_votes, image_url, age, mechanic, # owned, category, designer, weight def recommend_similar(data, row, vectors, names, weights=None, count=20): """Recommend similar items to one that is provided as 1st argument weights is ordered list of weights given to each component in vector. v(geek_rating, category, mechanic, designer, weight) """ if weights is None: weights = WEIGHTS coeffs = {} for key in vectors.keys(): if key == row.game_id: continue similar_coeff = weighted_sum(vectors[key], weights) if similar_coeff in coeffs.keys(): coeffs[similar_coeff].append(key) else: coeffs[similar_coeff] = [key] return sort_dict_values(data, coeffs) def weighted_sum(vector, weights): """Computes weighted sum of componentes of given network and its weights""" # v(category, mechanic, designer, weight) return sum(i*j for i, j in zip(vector, weights)) def sort_dict_values(data, coeffs): srted_keys = sorted(coeffs.keys(), reverse=True) srted = [] for c, k in zip(coeffs.items(), srted_keys): rank = [] for item in coeffs[k]: rank.append(data.ix[data['game_id'] == item]['rank'].values[0]) srted.append(rank) if len(srted) >= 20: break return srted
expected_output = { "list_of_neighbors": ["192.168.197.254"], "vrf": { "default": { "neighbor": { "192.168.197.254": { "address_family": { "ipv4 unicast": { "advertise_bit": 0, "bgp_table_version": 1, "dynamic_slow_peer_recovered": "never", "index": 0, "last_detected_dynamic_slow_peer": "never", "last_received_refresh_end_of_rib": "never", "last_received_refresh_start_of_rib": "never", "last_sent_refresh_end_of_rib": "1w5d", "last_sent_refresh_start_of_rib": "1w5d", "local_policy_denied_prefixes_counters": { "inbound": {"total": 0}, "outbound": {"total": 0}, }, "max_nlri": 0, "min_nlri": 0, "neighbor_version": "1/0", "output_queue_size": 0, "prefix_activity_counters": { "received": { "explicit_withdraw": 0, "implicit_withdraw": 0, "prefixes_current": 0, "prefixes_total": 0, "used_as_bestpath": 0, "used_as_multipath": 0, }, "sent": { "explicit_withdraw": 0, "implicit_withdraw": 0, "prefixes_current": 0, "prefixes_total": 0, "used_as_bestpath": "n/a", "used_as_multipath": "n/a", }, }, "refresh_activity_counters": { "received": { "refresh_end_of_rib": 0, "refresh_start_of_rib": 0, }, "sent": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, }, "refresh_epoch": 1, "refresh_out": 0, "slow_peer_detection": False, "slow_peer_split_update_group_dynamic": False, }, "l2vpn vpls": { "advertise_bit": 1, "bgp_table_version": 9431, "community_attribute_sent": True, "dynamic_slow_peer_recovered": "never", "extended_community_attribute_sent": True, "index": 38, "last_detected_dynamic_slow_peer": "never", "last_received_refresh_end_of_rib": "02:01:32", "last_received_refresh_start_of_rib": "02:01:36", "last_sent_refresh_end_of_rib": "02:41:38", "last_sent_refresh_start_of_rib": "02:41:38", "local_policy_denied_prefixes_counters": { "inbound": { "bestpath_from_this_peer": "n/a", "total": 0, }, "outbound": { "bestpath_from_this_peer": 402, "total": 402, }, }, "max_nlri": 199, "min_nlri": 0, "neighbor_version": "9431/0", "output_queue_size": 0, "prefix_activity_counters": { "received": { "explicit_withdraw": 0, "implicit_withdraw": 402, "prefixes_total": 603, "used_as_bestpath": 201, "used_as_multipath": 0, }, "sent": { "explicit_withdraw": 307, "implicit_withdraw": 5646, "prefixes_total": 6356, "used_as_bestpath": "n/a", "used_as_multipath": "n/a", }, }, "refresh_activity_counters": { "received": { "refresh_end_of_rib": 2, "refresh_start_of_rib": 2, }, "sent": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, }, "refresh_epoch": 3, "refresh_in": 4, "refresh_out": 0, "route_reflector_client": True, "slow_peer_detection": False, "slow_peer_split_update_group_dynamic": False, "suppress_ldp_signaling": True, "update_group_member": 38, }, "vpnv4 unicast": { "advertise_bit": 2, "bgp_table_version": 29454374, "dynamic_slow_peer_recovered": "never", "extended_community_attribute_sent": True, "index": 44, "last_detected_dynamic_slow_peer": "never", "last_received_refresh_end_of_rib": "02:01:32", "last_received_refresh_start_of_rib": "02:01:36", "last_sent_refresh_end_of_rib": "02:41:11", "last_sent_refresh_start_of_rib": "02:41:38", "local_policy_denied_prefixes_counters": { "inbound": { "bestpath_from_this_peer": "n/a", "total": 0, }, "outbound": { "bestpath_from_this_peer": 3100, "total": 3100, }, }, "max_nlri": 270, "min_nlri": 0, "neighbor_version": "29454374/0", "output_queue_size": 0, "prefix_activity_counters": { "received": { "explicit_withdraw": 206, "implicit_withdraw": 40708, "prefixes_total": 61115, "used_as_bestpath": 20201, "used_as_multipath": 0, }, "sent": { "explicit_withdraw": 1131817, "implicit_withdraw": 64677991, "prefixes_total": 68207251, "used_as_bestpath": "n/a", "used_as_multipath": "n/a", }, }, "refresh_activity_counters": { "received": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, "sent": { "refresh_end_of_rib": 1, "refresh_start_of_rib": 1, }, }, "refresh_epoch": 2, "refresh_in": 4, "refresh_out": 27, "route_reflector_client": True, "slow_peer_detection": False, "slow_peer_split_update_group_dynamic": False, "update_group_member": 44, }, }, "bgp_neighbor_session": {"sessions": 1}, "bgp_negotiated_capabilities": { "enhanced_refresh": "advertised and received", "four_octets_asn": "advertised and received", "graceful_restart": "advertised and received", "graceful_restart_af_advertised_by_peer": [ "vpnv4 unicast", "l2vpn vpls", ], "ipv4_unicast": "advertised", "l2vpn_vpls": "advertised and received", "multisession": "advertised", "remote_restart_timer": 120, "route_refresh": "advertised and received(new)", "stateful_switchover": "NO for session 1", "vpnv4_unicast": "advertised and received", }, "bgp_negotiated_keepalive_timers": { "hold_time": 90, "keepalive_interval": 30, "min_holdtime": 0, }, "bgp_neighbor_counters": { "messages": { "in_queue_depth": 0, "out_queue_depth": 0, "received": { "keepalives": 346, "notifications": 0, "opens": 1, "route_refresh": 0, "total": 13183, "updates": 12830, }, "sent": { "keepalives": 347, "notifications": 0, "opens": 1, "route_refresh": 4, "total": 12180, "updates": 11824, }, } }, "bgp_session_transport": { "address_tracking_status": "enabled", "connection": { "dropped": 38, "established": 39, "last_reset": "02:42:06", "reset_reason": "Peer closed the session", }, "gr_restart_time": 120, "gr_stalepath_time": 360, "graceful_restart": "enabled", "min_time_between_advertisement_runs": 0, "rib_route_ip": "192.168.197.254", "sso": False, "tcp_connection": False, "tcp_path_mtu_discovery": "enabled", }, "bgp_version": 4, "link": "internal", "remote_as": 5918, "router_id": "192.168.197.254", "session_state": "Idle", "shutdown": False, } } } }, }
nome = str(input('Dogite o nome de sua cidade: ')).lower().strip() city = nome.split() print('Sua cidade começa com santos?') print("santo" in city[0])
N, Q = map(int, input().split()) A = list(map(int, input().split())) X = list(map(int, input().split())) def two_pointers(x): left = 0 sm = 0 ans = 0 for right in range(N): sm += A[right] while(sm > x): sm -= A[left] left += 1 ans += (right-left+1) # leftに対する条件を満たすパターン数 return ans for x in X: print(two_pointers(x))
class RepositoryTests(TestCase): """Unit tests for Repository operations.""" fixtures = ["test_scmtools"] def setUp(self): super(RepositoryTests, self).setUp() self.local_repo_path = os.path.join(os.path.dirname(__file__), "..", "testdata", "git_repo") self.repository = Repository.objects.create(name="Git test repo", path=self.local_repo_path, tool=Tool.objects.get(name="Git")) self.scmtool_cls = self.repository.get_scmtool().__class__ self.old_get_file = self.scmtool_cls.get_file self.old_file_exists = self.scmtool_cls.file_exists def tearDown(self): super(RepositoryTests, self).tearDown() cache.clear() self.scmtool_cls.get_file = self.old_get_file self.scmtool_cls.file_exists = self.old_file_exists def test_archive(self): """Testing Repository.archive""" self.repository.archive() self.assertTrue(self.repository.name.startswith("ar:Git test repo:")) self.assertTrue(self.repository.archived) self.assertFalse(self.repository.public) self.assertIsNotNone(self.repository.archived_timestamp) repository = Repository.objects.get(pk=self.repository.pk) self.assertEqual(repository.name, self.repository.name) self.assertEqual(repository.archived, self.repository.archived) self.assertEqual(repository.public, self.repository.public) self.assertEqual(repository.archived_timestamp, self.repository.archived_timestamp) def test_archive_no_save(self): """Testing Repository.archive with save=False""" self.repository.archive(save=False) self.assertTrue(self.repository.name.startswith("ar:Git test repo:")) self.assertTrue(self.repository.archived) self.assertFalse(self.repository.public) self.assertIsNotNone(self.repository.archived_timestamp) repository = Repository.objects.get(pk=self.repository.pk) self.assertNotEqual(repository.name, self.repository.name) self.assertNotEqual(repository.archived, self.repository.archived) self.assertNotEqual(repository.public, self.repository.public) self.assertNotEqual(repository.archived_timestamp, self.repository.archived_timestamp) def test_clean_without_conflict(self): """Testing Repository.clean without name/path conflicts""" with self.assertNumQueries(1): self.repository.clean() def test_clean_with_name_conflict(self): """Testing Repository.clean with name conflict""" repository = Repository(name=self.repository.name, path="path/to/repo.git", tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {"name": ["A repository with this name already exists"]}) def test_clean_with_path_conflict(self): """Testing Repository.clean with path conflict""" repository = Repository(name="New test repo", path=self.repository.path, tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {"path": ["A repository with this path already exists"]}) def test_clean_with_name_and_path_conflict(self): """Testing Repository.clean with name and path conflict""" repository = Repository(name=self.repository.name, path=self.repository.path, tool=self.repository.tool) with self.assertRaises(ValidationError) as ctx: with self.assertNumQueries(1): repository.clean() self.assertEqual(ctx.exception.message_dict, {"name": ["A repository with this name already exists"], "path": ["A repository with this path already exists"]}) def test_clean_with_path_conflict_with_archived(self): """Testing Repository.clean with archived repositories ignored for path conflict """ self.repository.archive() repository = Repository(name="New test repo", path=self.repository.path, tool=self.repository.tool) with self.assertNumQueries(1): repository.clean() def test_get_file_caching(self): """Testing Repository.get_file caches result""" def get_file(self, path, revision, **kwargs): num_calls["get_file"] += 1 return b"file data" num_calls = {"get_file": 0} path = "readme" revision = "e965047" request = {} self.scmtool_cls.get_file = get_file data1 = self.repository.get_file(path, revision, request=request) data2 = self.repository.get_file(path, revision, request=request) self.assertIsInstance(data1, bytes) self.assertIsInstance(data2, bytes) self.assertEqual(data1, b"file data") self.assertEqual(data1, data2) self.assertEqual(num_calls["get_file"], 1) def test_get_file_signals(self): """Testing Repository.get_file emits signals""" def on_fetching_file(sender, path, revision, request, **kwargs): found_signals.append(("fetching_file", path, revision, request)) def on_fetched_file(sender, path, revision, request, **kwargs): found_signals.append(("fetched_file", path, revision, request)) found_signals = [] fetching_file.connect(on_fetching_file, sender=self.repository) fetched_file.connect(on_fetched_file, sender=self.repository) path = "readme" revision = "e965047" request = {} self.repository.get_file(path, revision, request=request) self.assertEqual(len(found_signals), 2) self.assertEqual(found_signals[0], ("fetching_file", path, revision, request)) self.assertEqual(found_signals[1], ("fetched_file", path, revision, request)) def test_get_file_exists_caching_when_exists(self): """Testing Repository.get_file_exists caches result when exists""" def file_exists(self, path, revision, **kwargs): num_calls["get_file_exists"] += 1 return True num_calls = {"get_file_exists": 0} path = "readme" revision = "e965047" request = {} self.scmtool_cls.file_exists = file_exists exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertTrue(exists1) self.assertTrue(exists2) self.assertEqual(num_calls["get_file_exists"], 1) def test_get_file_exists_caching_when_not_exists(self): """Testing Repository.get_file_exists doesn't cache result when the file does not exist """ def file_exists(self, path, revision, **kwargs): num_calls["get_file_exists"] += 1 return False num_calls = {"get_file_exists": 0} path = "readme" revision = "12345" request = {} self.scmtool_cls.file_exists = file_exists exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertFalse(exists1) self.assertFalse(exists2) self.assertEqual(num_calls["get_file_exists"], 2) def test_get_file_exists_caching_with_fetched_file(self): """Testing Repository.get_file_exists uses get_file's cached result""" def get_file(self, path, revision, **kwargs): num_calls["get_file"] += 1 return b"file data" def file_exists(self, path, revision, **kwargs): num_calls["get_file_exists"] += 1 return True num_calls = {"get_file_exists": 0, "get_file": 0} path = "readme" revision = "e965047" request = {} self.scmtool_cls.get_file = get_file self.scmtool_cls.file_exists = file_exists self.repository.get_file(path, revision, request=request) exists1 = self.repository.get_file_exists(path, revision, request=request) exists2 = self.repository.get_file_exists(path, revision, request=request) self.assertTrue(exists1) self.assertTrue(exists2) self.assertEqual(num_calls["get_file"], 1) self.assertEqual(num_calls["get_file_exists"], 0) def test_get_file_exists_signals(self): """Testing Repository.get_file_exists emits signals""" def on_checking(sender, path, revision, request, **kwargs): found_signals.append(("checking_file_exists", path, revision, request)) def on_checked(sender, path, revision, request, **kwargs): found_signals.append(("checked_file_exists", path, revision, request)) found_signals = [] checking_file_exists.connect(on_checking, sender=self.repository) checked_file_exists.connect(on_checked, sender=self.repository) path = "readme" revision = "e965047" request = {} self.repository.get_file_exists(path, revision, request=request) self.assertEqual(len(found_signals), 2) self.assertEqual(found_signals[0], ("checking_file_exists", path, revision, request)) self.assertEqual(found_signals[1], ("checked_file_exists", path, revision, request)) def test_repository_name_with_255_characters(self): """Testing Repository.name with 255 characters""" self.repository = Repository.objects.create(name="t" * 255, path=self.local_repo_path, tool=Tool.objects.get(name="Git")) self.assertEqual(len(self.repository.name), 255) def test_is_accessible_by_with_public(self): """Testing Repository.is_accessible_by with public repository""" user = self.create_user() repository = self.create_repository() self.assertTrue(repository.is_accessible_by(user)) self.assertTrue(repository.is_accessible_by(AnonymousUser())) def test_is_accessible_by_with_public_and_hidden(self): """Testing Repository.is_accessible_by with public hidden repository""" user = self.create_user() repository = self.create_repository(visible=False) self.assertTrue(repository.is_accessible_by(user)) self.assertTrue(repository.is_accessible_by(AnonymousUser())) def test_is_accessible_by_with_private_and_not_member(self): """Testing Repository.is_accessible_by with private repository and user not a member """ user = self.create_user() repository = self.create_repository(public=False) self.assertFalse(repository.is_accessible_by(user)) self.assertFalse(repository.is_accessible_by(AnonymousUser())) def test_is_accessible_by_with_private_and_member(self): """Testing Repository.is_accessible_by with private repository and user is a member """ user = self.create_user() repository = self.create_repository(public=False) repository.users.add(user) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_and_member_by_group(self): """Testing Repository.is_accessible_by with private repository and user is a member by group """ user = self.create_user() group = self.create_review_group(invite_only=True) group.users.add(user) repository = self.create_repository(public=False) repository.review_groups.add(group) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_and_superuser(self): """Testing Repository.is_accessible_by with private repository and user is a superuser """ user = self.create_user(is_superuser=True) repository = self.create_repository(public=False) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_not_member(self): """Testing Repository.is_accessible_by with private hidden repository and user not a member """ user = self.create_user() repository = self.create_repository(public=False, visible=False) self.assertFalse(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_member(self): """Testing Repository.is_accessible_by with private hidden repository and user is a member """ user = self.create_user() repository = self.create_repository(public=False, visible=False) repository.users.add(user) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_member_by_group(self): """Testing Repository.is_accessible_by with private hidden repository and user is a member """ user = self.create_user() group = self.create_review_group(invite_only=True) group.users.add(user) repository = self.create_repository(public=False, visible=False) repository.review_groups.add(group) self.assertTrue(repository.is_accessible_by(user)) def test_is_accessible_by_with_private_hidden_and_superuser(self): """Testing Repository.is_accessible_by with private hidden repository and superuser """ user = self.create_user(is_superuser=True) repository = self.create_repository(public=False, visible=False) self.assertTrue(repository.is_accessible_by(user)) @add_fixtures(["test_users", "test_site"]) def test_is_accessible_by_with_local_site_accessible(self): """Testing Repository.is_accessible_by with Local Site accessible by user """ user = self.create_user() repository = self.create_repository(with_local_site=True) repository.local_site.users.add(user) self.assertTrue(repository.is_accessible_by(user)) @add_fixtures(["test_users", "test_site"]) def test_is_accessible_by_with_local_site_not_accessible(self): """Testing Repository.is_accessible_by with Local Site not accessible by user """ user = self.create_user() repository = self.create_repository(with_local_site=True) self.assertFalse(repository.is_accessible_by(user)) self.assertFalse(repository.is_accessible_by(AnonymousUser()))
def check_paranthesis(inp): stack = [] c = 1 for i in inp: if i in ['(','[','{']: stack.append(i) c += 1 else: if len(stack) == 0: return c elif (i == ')' and stack[-1] == '(') or (i == ']' and stack[-1] == '[') or (i == '}' and stack[-1] == '{'): stack.pop() c += 1 else: return c return 0 if __name__ == "__main__": inp = input() print(check_paranthesis(inp)) ''' got a string containing {,},[,],(,). you have to check that the paranthesis are balanced or not. if yes than print 0 else print the index+1 value where error occurs input 1. {([])}[] 2. {{[]}}} output 1. 0 2. 7 '''
# Bazel macro that instantiates a native cc_test rule for an S2 test. def s2test(name, deps = [], size = "small"): native.cc_test( name = name, srcs = ["%s.cc" % (name)], copts = [ "-Iexternal/gtest/include", "-DS2_TEST_DEGENERACIES", "-DS2_USE_GFLAGS", "-DS2_USE_GLOG", "-DHASH_NAMESPACE=std", "-Wno-deprecated-declarations", "-Wno-format", "-Wno-non-virtual-dtor", "-Wno-parentheses", "-Wno-sign-compare", "-Wno-strict-aliasing", "-Wno-unused-function", "-Wno-unused-private-field", "-Wno-unused-variable", "-Wno-unused-function", ], deps = [":s2testing"] + deps, size = size, )
# # PySNMP MIB module CT-DAWANDEVCONN-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CT-DAWANDEVCONN-MIB # Produced by pysmi-0.3.4 at Wed May 1 12:28:40 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint") cabletron, = mibBuilder.importSymbols("CTRON-OIDS", "cabletron") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Gauge32, ModuleIdentity, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Counter32, NotificationType, MibIdentifier, Integer32, iso, Counter64, IpAddress, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ModuleIdentity", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Counter32", "NotificationType", "MibIdentifier", "Integer32", "iso", "Counter64", "IpAddress", "Unsigned32", "TimeTicks") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") ctSSA = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4497)) daWanDevConn = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4497, 23)) daWanDevConnTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1), ) if mibBuilder.loadTexts: daWanDevConnTable.setStatus('mandatory') if mibBuilder.loadTexts: daWanDevConnTable.setDescription('A list of Demand Access remote WAN connections') daWanDevConnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1), ).setIndexNames((0, "CT-DAWANDEVCONN-MIB", "daWanDeviceIndex"), (0, "CT-DAWANDEVCONN-MIB", "daWanConnectionIndex")) if mibBuilder.loadTexts: daWanDevConnEntry.setStatus('mandatory') if mibBuilder.loadTexts: daWanDevConnEntry.setDescription('An entry containing wan connection information and statistics.') daWanDeviceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: daWanDeviceIndex.setStatus('mandatory') if mibBuilder.loadTexts: daWanDeviceIndex.setDescription('This is the index into this table. This index uniquely identifies the connection associated with the device.') daWanConnectionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4497, 23, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: daWanConnectionIndex.setStatus('mandatory') if mibBuilder.loadTexts: daWanConnectionIndex.setDescription('This is the index into this table. This index uniquely identifies the connection associated with the device.') mibBuilder.exportSymbols("CT-DAWANDEVCONN-MIB", daWanDeviceIndex=daWanDeviceIndex, daWanConnectionIndex=daWanConnectionIndex, daWanDevConnEntry=daWanDevConnEntry, daWanDevConn=daWanDevConn, daWanDevConnTable=daWanDevConnTable, ctSSA=ctSSA)
original=1 binario=0 while original!=0: number=int(input("Insira um número de no máximo 5 algarismos: ")) original=number while number>0: mod=number%2 number=number//2 if original==((number*2)+mod): binario=str(mod) else: binario=str(mod)+binario print("Decimal:",original,"\nBinario:",binario) binario=0
class Node(object): def __init__(self, item): self.item = item self.next = None def get_item(self): return self.item def get_next(self): return self.next def set_item(self, new_item): self.item = new_item def set_next(self, new_next): self.next = new_next class LinkedList(object): def __init__(self): self.head = None def is_empty(self): return self.head == None def add(self, item): temp = Node(item) temp.set_next(self.head) self.head = temp def count_size(self): count = 0 current = self.head while current is not None: count += 1 current = current.get_next() return count def search(self, item): found = False current = self.head while not found and current is not None: if item == current.get_item(): found = True current = current.get_next() return found def remove(self, item): found = False current = self.head previous = None while not found: if item == current.get_item(): found = true else: previous = current current = current.get_next() if previous == None: self.head = current.get_next() else: previous.set_next(current.get_next())
class Solution(object): # # Backtracking Approach - TLE # def canJump(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # return self.canJumpHelper(nums, 0) # # def canJumpHelper(self, nums, currentIdx): # if currentIdx == len(nums) - 1: # return True # nextMaxJumpIdx = min(currentIdx + nums[currentIdx], len(nums) - 1) # for i in range(nextMaxJumpIdx, currentIdx, # -1): # Python for loop decrementing index >> https://stackoverflow.com/questions/28650128/python-for-loop-decrementing-index/28650284 # if self.canJumpHelper(nums, i): # return True # return False # # # DP top-down with memoization Approach - TLE # def canJump(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # memo = [0] * len(nums) # memo[-1] = True # Here True means reachable and True means not rechable # return self.canJumpHelper(nums, 0, memo) # # def canJumpHelper(self, nums, currentIdx, memo): # if memo[currentIdx] != 0: # return memo[currentIdx] # nextMaxJumpIdx = min(currentIdx + nums[currentIdx], len(nums) - 1) # for i in range(nextMaxJumpIdx, currentIdx, -1): # if self.canJumpHelper(nums, i, memo): # memo[currentIdx] = True # return True # memo[currentIdx] = False # return False # # # DP bottom-up with memoization Approach - TLE # def canJump(self, nums): # """ # :type nums: List[int] # :rtype: bool # """ # memo = [0] * len(nums) # memo[-1] = True # Here True means reachable and True means not rechable # for i in range(len(nums) - 1, -1, -1): # nextMaxJumpIdx = min(i + nums[i], len(nums) - 1) # for j in range(i + 1, nextMaxJumpIdx + 1): # if memo[j] is True: # memo[i] = True # break # return True if memo[0] == True else False # Greedy def canJump(self, nums): """ :type nums: List[int] :rtype: bool """ lastPosition = len(nums) - 1 for i in range(len(nums) - 1, -1, -1): nextMaxJump = i + nums[i] if nextMaxJump >= lastPosition: lastPosition = i return True if lastPosition == 0 else False sol = Solution() # input = [2,3,0,1,4] input = [3,2,1,0,4] # input = [2,0,6,9,8,4,5,0,8,9,1,2,9,6,8,8,0,6,3,1,2,2,1,2,6,5,3,1,2,2,6,4,2,4,3,0,0,0,3,8,2,4,0,1,2,0,1,4,6,5,8,0,7,9,3,4,6,6,5,8,9,3,4,3,7,0,4,9,0,9,8,4,3,0,7,7,1,9,1,9,4,9,0,1,9,5,7,7,1,5,8,2,8,2,6,8,2,2,7,5,1,7,9,6] output = sol.canJump(input) print('res: ', output)
# The major optimization is to do arithmetic in base 10 in the main loop, avoiding division and modulo def compute(): # Initialize n = 1000000000 # The pattern is greater than 10^18, so start searching at 10^9 ndigits = [0] * 10 # In base 10, little-endian temp = n for i in range(len(ndigits)): ndigits[i] = temp % 10 temp //= 10 n2digits = [0] * 19 # Based on length of pattern temp = n * n for i in range(len(n2digits)): n2digits[i] = temp % 10 temp //= 10 # Increment and search while not is_concealed_square(n2digits): # Add 20n + 100 so that n2digits = (n + 10)^2 add_20n(ndigits, n2digits) add_10pow(n2digits, 2) # Since n^2 ends with 0, n must end with 0 n += 10 add_10pow(ndigits, 1) # Now n2digits = n^2 return str(n) def is_concealed_square(n): for i in range(1, 10): # Scan for 1 to 9 if n[20 - i * 2] != i: return False return n[0] == 0 # Special case for 0 def add_10pow(n, i): while n[i] == 9: n[i] = 0 i += 1 n[i] += 1 def add_20n(n, n2): carry = 0 i = 0 while i < len(n): sum = n[i] * 2 + n2[i + 1] + carry n2[i + 1] = sum % 10 carry = sum // 10 i += 1 i += 1 while carry > 0: sum = n2[i] + carry n2[i] = sum % 10 carry = sum // 10 i += 1 if __name__ == "__main__": print(compute())
class Users: def __init__(self, first_name='', last_name='',age='', email='', phone=''): self.first_name = first_name self.last_name = last_name self.age = age self.email = email self.phone = phone def describe_user(self): print('Tu nombre es: ', self.first_name) print('Tu apellido es: ', self.last_name) print('Tu edad es: ', self.age) print('Tu correo es: ', self.email) print('Tu numero telefonico es: ', self.phone) def greet_user(self): print('Hola! ', self.first_name, 'La informacion Proporcionada esta bien?') usuario_1 = Users('Andres', 'Lopez', '22 años', 'andrelomeza@gmail.com', '8442863199') usuario_1.describe_user() usuario_1.greet_user() usuario_1 = Users('Abril', 'Valenciano', '22 años', 'vacm6996@gmai.com', '8342155323') usuario_1.describe_user() usuario_1.greet_user()
def parse_string_time(input_time: str) -> float: total_amount = 0 times = _slice_input_times(input_time) for _amount, duration_type in times: amount = _to_float(_amount) multiplier = _parse_multiplier(duration_type) total_amount += amount * multiplier return total_amount def _parse_multiplier(duration_type): multiplier = 0 if 'ms' == duration_type: multiplier = .001 elif 'sec' == duration_type: multiplier = 1 elif 'min' == duration_type: multiplier = 60 elif 'hr' == duration_type: multiplier = 60 return multiplier def _slice_input_times(input_time: str) -> iter: input_time_chunks = iter(input_time.split(' ')) input_time_tuples = zip(input_time_chunks, input_time_chunks) return input_time_tuples def _to_float(amount: str) -> float: try: amount = float(amount) except: amount = 0.0 return amount
""" 给你两个单词 word1 和 word2,请你计算出将 word1 转换成 word2 所使用的最少操作数 。 你可以对一个单词进行如下三种操作: 插入一个字符 删除一个字符 替换一个字符   示例 1: 输入:word1 = "horse", word2 = "ros" 输出:3 解释: horse -> rorse (将 'h' 替换为 'r') rorse -> rose (删除 'r') rose -> ros (删除 'e') 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/edit-distance 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 """ # bottom to top class Solution: def minDistance(self, word1, word2): ''' ''' l1, l2 = len(word1), len(word2) if l1*l2 == 0: return l1+l2 Dp = [[0]*(l2+1) for _ in range(l1+1)] for i in range(l1+1): Dp[i][0] = i for i in range(l2+1): Dp[0][i] = i for i in range(1,l1+1): for j in range(1,l2+1): add_2 = Dp[i-1][j] + 1 cut_2 = Dp[i][j-1] + 1 relate = Dp[i-1][j-1] if word2[j-1] != word1[i-1]: relate += 1 Dp[i][j] = min(add_2, cut_2, relate) return Dp[l1][l2] if __name__ == "__main__": s = Solution() word1 = "horse" word2 = "ros" print(s.minDistance(word1, word2))
"""This module contains j2cl_js_provider helpers.""" load( "@io_bazel_rules_closure//closure:defs.bzl", "CLOSURE_JS_TOOLCHAIN_ATTRS", "closure_js_binary", "create_closure_js_library", "web_library", ) def create_js_lib_struct(j2cl_info, extra_providers = []): return struct( providers = [j2cl_info] + extra_providers, closure_js_library = j2cl_info._private_.js_info.closure_js_library, exports = j2cl_info._private_.js_info.exports, ) def j2cl_js_provider(ctx, srcs = [], deps = [], exports = [], artifact_suffix = ""): """ Creates a js provider from provided sources, deps and exports. """ default_j2cl_suppresses = [ "analyzerChecks", "JSC_UNKNOWN_EXPR_TYPE", ] suppresses = default_j2cl_suppresses + getattr(ctx.attr, "js_suppress", []) js = create_closure_js_library( ctx, srcs, deps, exports, suppresses, convention = "GOOGLE", ) return struct( closure_js_library = js.closure_js_library, exports = js.exports, ) def js_devserver( name, entry_point_defs, deps, dev_resources, **kwargs): """Creates a development server target.""" closure_js_binary( name = name, compilation_level = "BUNDLE", defs = entry_point_defs, deps = deps, # For J2CL it is in impractical to embed all source into sourcemap since # it bloats sourcemaps as well as it slows down bundling. nodefs = ["--source_map_include_content"], **kwargs ) web_library( name = "%s_server" % name, srcs = dev_resources, path = "/", tags = [ "ibazel_live_reload", # Enable ibazel reload server. "ibazel_notify_changes", # Do not to restart the server on changes. ], ) js_binary = closure_js_binary J2CL_JS_TOOLCHAIN_ATTRS = CLOSURE_JS_TOOLCHAIN_ATTRS J2CL_JS_ATTRS = { "js_suppress": attr.string_list(), } JS_PROVIDER_NAME = "closure_js_library" J2CL_OPTIMIZED_DEFS = [ "--define=goog.DEBUG=false", ] # Place holder until we implement unit testing support for open-source. J2CL_TEST_DEFS = []
class Solution(object): def getSum(self, a, b): """ :type a: int :type b: int :rtype: int """ MAX, MASK = 0x7FFFFFFF, 0xFFFFFFFF while b != 0: a, b = (a ^ b) & MASK, ((a & b) << 1) & MASK return a if a <= MAX else ~(a ^ MASK)
HITBOX_OCTANE = 0 HITBOX_DOMINUS = 1 HITBOX_PLANK = 2 HITBOX_BREAKOUT = 3 HITBOX_HYBRID = 4 HITBOX_BATMOBILE = 5
N, M = map(int, input().split()) A = list(map(int, input().split())) threshold = sum(A) / (4 * M) if len([a for a in A if a >= threshold]) >= M: print('Yes') else: print('No')
class Solution: def longestCommonPrefix(self, strs: List[str]) -> str: # exception if len(strs) == 0: return "" # sort first! strs.sort() # strategy # compare first and last string in sorted strings! # pick first element in strs pick = strs[0] prefix = '' for i in range (len(pick)): if strs[len(strs)-1][i] == pick[i]: prefix += strs[len(strs)-1][i] else: break return prefix
class Node: def __init__(self, data): self.data = data self.left = None self.right = None class BST: def __init__(self): self.root = None def _add(self, node, data): if data <= node.data: if node.left: self._add(node.left, data) else: node.left = Node(data) else: if node.right: self._add(node.right, data) else: node.right = Node(data) def add(self, data): if self.root: self._add(self.root, data) else: self.root = Node(data) def same_tree(self, t1, t2): if not t1 and not t2: return True if not t1 or not t2: return False if t1.data != t2.data: return False return self.same_tree(t1.left, t2.left) and self.same_tree(t1.right, t2.right) def main(): B1 = BST() nodes = [8, 3, 10, 1, 6, 4, 7, 13, 14] for n in nodes: B1.add(n) B2 = BST() nodes = [8, 3, 10, 1, 6, 4, 7, 13, 14] for n in nodes: B2.add(n) print(B1.same_tree(B1.root, B2.root)) if __name__ == '__main__': main()
class SymbolMapper(object): def __init__(self): self.symbolmap = {0: '0', 1: '+', -1: '-'} @staticmethod def normalize(value): return 0 if value == 0 else value / abs(value) def inputs2symbols(self, inputs): return map( lambda value: self.symbolmap[SymbolMapper.normalize(value)], inputs)
# -*- coding: utf-8 -*- # author: ysoftman # python version : 2.x 3.x # desc : tuple test def tuple_test(): # 튜플이 리스트와 다른점 1 # 표현시 () 를 사용한다. 리스트는 [] 사용 tp = (1, 'ysoftman', 7, 'abc') print('tuple = ', tp) # 튜플이 리스트와 다른점 2 # 튜플은 데이터 수정을 할 수 없다. # 에러 발생 # del tp[1] # 반면 리스트는 수정할 수 있다. ls = [1, 'ysoftman', 7, 'abc'] print('list = ', ls) del ls[1] print('list = ', ls) if __name__ == '__main__': tuple_test()
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # (c)2021 .direwolf <kururinmiracle@outlook.com> # Licensed under the MIT License. class AffNoteTypeError(Exception): pass class AffNoteIndexError(Exception): pass class AffNoteValueError(Exception): pass class AffSceneTypeError(Exception): pass class AffReadError(Exception): pass
n = s = cont = 0 while True: n = int (input('Digite um número[999 para finalizar]: ')) if n == 999: break s+=n cont+=1 print(f'Você digitou {cont} números, a soma deles é {s}')
class RC4 : def __init__(self): self.S = [] def preprocess_hex_chars(self, text) : """ Preprocess text by decoding hex characters into ASCII characters """ preprocessed_text = '' i = 0 while i < len(text) : if '\\x' == text[i:i+2] : c = int(text[i+2:i+4], base=16) preprocessed_text += chr(c) i += 4 else : preprocessed_text += text[i] i += 1 return preprocessed_text def ksa(self, key) : """ Key-Scheduling Algorithm Initialize S array, and then permute it using a key """ if len(key) < 2 : raise Exception("Key must be at least 2 characters long") self.S = [i for i in range(256)] j = 0 for i in range(256) : j = (j + self.S[i] + ord(key[i % len(key)])) % 256 self.S[i], self.S[j] = self.S[j], self.S[i] def prga(self, plaintext) : """ Pseudo-Random Generation Algorithm Generate keystream by swapping S[i] and S[j], then summing them """ if len(plaintext) == 0 : raise Exception("Plaintext cannot be empty") keystream = '' i = 0; j = 0 for idx in range(len(plaintext)) : i = (i + 1) % 256 j = (j + self.S[i]) % 256 self.S[i], self.S[j] = self.S[j], self.S[i] t = (self.S[i] + self.S[j]) % 256 keystream += chr(self.S[t]) return keystream def encrypt(self, plaintext, key) : """ Encrypt plaintext by given key using RC4 algorithm """ if len(plaintext) == 0 : raise Exception("Plaintext cannot be empty") if len(key) < 2 : raise Exception("Key must be at least 2 characters long") ciphertext = '' self.ksa(key) keystream = self.prga(plaintext) for idx in range(len(plaintext)) : c = chr(ord(keystream[idx]) ^ ord(plaintext[idx])) ciphertext += c if c.isprintable() else r'\x{0:02x}'.format(ord(c)) return ciphertext def decrypt(self, ciphertext, key) : """ Decrypt ciphertext by given key using RC4 algorithm """ if len(ciphertext) == 0 : raise Exception("Ciphertext cannot be empty") if len(key) < 2 : raise Exception("Key must be at least 2 characters long") ciphertext = self.preprocess_hex_chars(ciphertext) plaintext = '' self.ksa(key) keystream = self.prga(ciphertext) for idx in range(len(ciphertext)) : p = chr(ord(keystream[idx]) ^ ord(ciphertext[idx])) plaintext += p if p.isprintable() else r'\x{0:02x}'.format(ord(p)) return plaintext # print(r"\x{0:02x}".format(ord('a'))) # print(u'\x61') # print('\x61') # rc4 = RC4() # key = "secret_key" # cip = rc4.encrypt("kriptografi sangat menyenangkan", key) # print(cip) # pla = rc4.decrypt(cip, key) # print(pla)
# Ignore file list ignore_filelist = [ 'teslagun.activeitem', 'teslagun2.activeitem', ] ignore_filelist_patch = [ ]
""" Date : Avril 2020 Projet : MOOC Python 3 - France Université Numérique Objectif: Le Petit Prince vient de débarquer sur la planète U357, et il apprend qu'il peut y voir de belles aurores boréales ! La planète U357 a deux soleils : les étoiles E1515 et E666. C'est pour cela que les tempêtes magnétiques sont permanentes, ce qui est excellent pour avoir des aurores boréales. Par contre, il y fait souvent jour, sauf bien évidemment quand les deux soleils sont couchés en même temps. Heureusement pour nous, une journée U357 s'écoule sur 24 heures comme sur notre Terre, et pour simplifier, nous ne prendrons pas en compte les minutes (on ne donne que les heures avec des valeurs entières entre 0 et 23). Nous vous demandons d'aider le Petit Prince à déterminer les périodes de jour et de nuit. UPYLAB 4.2B Il vous faut maintenant écrire un programme qui lit en entrée : . l'heure de lever du soleil E1515 . l'heure du coucher du soleil E1515 . l'heure de lever du soleil E666 . l'heure du coucher du soleil E666 et qui utilise la fonction soleil_leve pour afficher ligne par ligne chacune des heures de la journée, depuis 0 jusqu'à 23, suivies d'une espace et d'une astérisque s'il fait nuit à cette heure. Attention, il ne fera nuit que si E1515 et E666 sont tous deux couchés. Consignes: N'oubliez pas d'insérer votre fonction soleil_leve. Attention, nous rappelons que votre code sera évalué en fonction de ce qu’il affiche, donc veillez à n’imprimer que le résultat attendu. En particulier, il ne faut rien écrire à l’intérieur des appels à input (int(input())et non int(input("Entrer un nombre : ")) par exemple), ni ajouter du texte dans ce qui est imprimé (print(res) et non print("résultat :", res) par exemple). """ def soleil_leve(lever, coucher, heure): cas1 = lever == coucher == 0 cas2 = lever <= heure < coucher cas3 = coucher < lever and (heure < coucher or lever <= heure) return cas1 or cas2 or cas3 lE1515 = int(input()) # lever_e1515 cE1515 = int(input()) # coucher_e1515 lE666 = int(input()) # lever_e666 cE666 = int(input()) # coucher_e666 for heure in range(24): if soleil_leve(lE1515, cE1515, heure) or soleil_leve(lE666, cE666, heure): print(heure) else: print(heure, '*') """ Avec les données lues suivantes : 6 18 10 21 0 * 1 * 2 * 3 * 4 * 5 * 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 * 22 * 23 """
#Given two strings, a and b, return the result of putting them together in the order abba, e.g. "Hi" and "Bye" returns "HiByeByeHi". #make_abba('Hi', 'Bye') → 'HiByeByeHi' #make_abba('Yo', 'Alice') → 'YoAliceAliceYo' #make_abba('What', 'Up') → 'WhatUpUpWhat' def make_abba(a,b): return a+b+b+a
# This is input for <FileUploadToCommons.py> that actually writes the content to Wikimedia commons using the API #See https://pypi.org/project/mwtemplates/ # ===============BEGIN TEMPLETE====================== # Lets use a minimally filled {{Infomormation}} template - https://commons.wikimedia.org/wiki/Template:Information fileTemplate = """ =={{{{int:filedesc}}}}== {{{{Information |author = {author} |description = {{{{en|1=Page {page} of the Album amicorum Jacob Heyblocq KB131H26}}}} |source = https://resolver.kb.nl/resolve?urn=EuropeanaTravel:131H26:{page} }}}} =={{{{int:license-header}}}}== {{{{Koninklijke Bibliotheek}}}} {{{{PD-art|PD-old-70-1923}}}} [[Category:Album amicorum van Jacobus Heyblocq]] """ # ==============END TEMPLATE==================== def writeFileTemplate(dataframe,rowdict): # input = 1 full row from the Excel sheet, formatted as dict # Input = 1 row from Excel file, as dict # Ouput = Commons source code for a file page, based on Information-template fileText = fileTemplate.format( page = rowdict['page'], author = rowdict['contributorname'].strip() ) return fileText
# funcao como parametro de funcao # so imprime se o numero estiver correto def imprime_com_condicao(num, fcond): if fcond(num): print(num) def par(x): return x % 2 == 0 def impar(x): return not par(x) # Programa Principal # neste caso nao imprimira imprime_com_condicao(5, par)
n = int(input().strip()) a = list(map(int, input().strip().split(' '))) swaps = 0 for i in range(n): temp=0 for j in range(n-1): if a[j] > a[j+1]: temp = a[j] a[j] = a[j+1] a[j+1] = temp swaps+=1 if swaps==0: print("Array is sorted in", swaps, "swaps.") print("First Element:", a[0]) print("Last Element:", a[-1]) else: print("Array is sorted in", swaps, "swaps.") print("First Element:", a[0]) print("Last Element:", a[-1])
t = int(input()) for i in range(t): r = int(input()) length = r*5 width = length*0.6 left = -1*length*0.45 right = length*0.55 w = width/2 # print coordinates print('Case '+str(i+1)+':') # upper left print("%.0f %.0f" % (left, w)) # upper right print("%.0f %.0f" % (right, w)) # lower right print("%.0f %.0f" % (right, -1*w)) # lower left print("%.0f %.0f" % (left, -1*w))
myString = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj." inTab = "yzabcdefghijklmnopqrstuvwx" outTab = "abcdefghijklmnopqrstuvwxyz" transTab = str.maketrans(inTab, outTab) outString = myString.translate(transTab) print(outString) urlStr = "map" outUrl = urlStr.translate(transTab) print(outUrl)
class unitConversion(): def __init__(self, scale, offset) -> None: self.scale = scale self.offset = offset def convertToSI(self, upper=True, isComposite=False): if upper: if isComposite: return [self.scale, 0] else: return [self.scale, self.offset] else: return self.convertFromSI(not upper, isComposite) def convertFromSI(self, upper=True, isComposite=False): if upper: if isComposite: return [1 / self.scale, 0] else: return [1 / self.scale, -self.offset / self.scale] else: return self.convertToSI(not upper, isComposite) class unit(): def __init__(self) -> None: unit = { '1': unitConversion(1, 0) } force = { 'N': unitConversion(1, 0) } mass = { 'g': unitConversion(1 / 1000, 0) } energy = { 'J': unitConversion(1, 0), } effect = { 'W': unitConversion(1, 0) } pressure = { 'Pa': unitConversion(1, 0), 'bar': unitConversion(1e5, 0) } temperature = { 'K': unitConversion(1, 0), 'C': unitConversion(1, 273.15), 'F': unitConversion(5 / 9, 273.15 - 32 * 5 / 9) } time = { 's': unitConversion(1, 0), 'min': unitConversion(60, 0), 'h': unitConversion(60 * 60, 0), 'yr': unitConversion(60 * 60 * 24 * 365, 0) } volume = { 'm3': unitConversion(1, 0), 'L': unitConversion(1 / 1000, 0) } length = { 'm': unitConversion(1, 0) } self.units = { 'kg-m/s2': force, 'kg/m-s2': pressure, 's': time, 'K': temperature, 'm3': volume, 'm': length, 'kg-m2/s2': energy, 'kg-m2/s3': effect, 'kg': mass, '1': unit } self.prefixes = { 'µ': 1e-6, 'm': 1e-3, 'k': 1e3, 'M': 1e6 } def convertToSI(self, value, unit): upper, lower = self.splitCompositeUnit(unit) isComposite = not (len(lower) == 0 and len(upper) == 1) unitUpper = [] unitLower = [] for unit in upper: conversion, u, exp = self.convert(unit, toSI=True, upper=True, isComposite=isComposite) for _ in range(exp): value = value * conversion[0] + conversion[1] siUpper, siLower = self.splitCompositeUnit(u) siUpperExp = [] siLowerExp = [] for i, up in enumerate(siUpper): u, siExp = self.removeExponentFromUnit(up) siUpper[i] = u siUpperExp.append(siExp * exp) for i, low in enumerate(siLower): u, siExp = self.removeExponentFromUnit(low) siLower[i] = u siLowerExp.append(siExp * exp) for up, upExp in zip(siUpper, siUpperExp): if upExp != 1: up += str(upExp) unitUpper.append(up) for low, lowExp in zip(siLower, siLowerExp): if lowExp != 1: low += str(lowExp) unitLower.append(low) for unit in lower: conversion, u, exp = self.convert(unit, toSI=True, upper=False, isComposite=isComposite) for _ in range(exp): value = value * conversion[0] + conversion[1] siUpper, siLower = self.splitCompositeUnit(u) siUpperExp = [] siLowerExp = [] for i, up in enumerate(siUpper): u, siExp = self.removeExponentFromUnit(up) siUpper[i] = u siUpperExp.append(siExp * exp) for i, low in enumerate(siLower): u, siExp = self.removeExponentFromUnit(low) siLower[i] = u siLowerExp.append(siExp * exp) for up, upExp in zip(siUpper, siUpperExp): if upExp != 1: up += str(upExp) unitLower.append(up) for low, lowExp in zip(siLower, siLowerExp): if lowExp != 1: low += str(lowExp) unitUpper.append(low) upperUpper = [] upperLower = [] lowerUpper = [] lowerLower = [] for u in unitUpper: up, low = self.splitCompositeUnit(u) upperUpper += up upperLower += low for u in unitLower: up, low = self.splitCompositeUnit(u) lowerUpper += up lowerLower += low unitUpper = upperUpper + lowerLower unitLower = upperLower + lowerUpper # cancle out upper and lower unitUpper, unitLower = self.cancleUnits(unitUpper, unitLower) # combine the upper and lower outUnit = self.combineUpperAndLower(unitUpper, unitLower) return value, outUnit def convertFromSI(self, value, unit): upper, lower = self.splitCompositeUnit(unit) isComposite = not (len(lower) == 0 and len(upper) == 1) for u in upper: conversion, u, exp = self.convert(u, toSI=False, upper=True, isComposite=isComposite) for _ in range(exp): value = value * conversion[0] + conversion[1] for u in lower: conversion, u, exp = self.convert(u, toSI=False, upper=False, isComposite=isComposite) for _ in range(exp): value = value * conversion[0] + conversion[1] return value, unit def splitCompositeUnit(self, compositeUnit): special_characters = """!@#$%^&*()+?_=.,<>\\""" if any(s in compositeUnit for s in special_characters): raise ValueError('The unit can only contain slashes (/), hyphens (-)') # remove spaces compositeUnit = compositeUnit.replace(' ', '') slash = '/' if slash in compositeUnit: index = compositeUnit.find('/') upper = compositeUnit[0:index] lower = compositeUnit[index + 1:] # check for multiple slashes if slash in upper or slash in lower: raise ValueError('A unit can only have a single slash (/)') # split the upper and lower upper = upper.split('-') lower = lower.split('-') else: upper = compositeUnit.split('-') lower = [] return upper, lower def removeExponentFromUnit(self, unit): # find any integers in the unit num = [] num_indexes = [] for i, s in enumerate(unit): if s.isdigit(): num.append(s) num_indexes.append(i) # determine if all integers are placed consequtively for i in range(len(num_indexes) - 1): elem_curr = num_indexes[i] elem_next = num_indexes[i + 1] if not elem_next == elem_curr + 1: raise ValueError('All numbers in the unit has to be grouped together') # determien if the last integer is placed at the end of the unit if len(num) != 0: if max(num_indexes) != len(unit) - 1: raise ValueError('Any number has to be placed at the end of the unit') # remove the inters from the unit if len(num) != 0: for i in reversed(num_indexes): unit = unit[0:i] + unit[i + 1:] # combine the exponent if len(num) != 0: exponent = int(''.join(num)) else: exponent = 1 # check if the intire unit has been removed by the integers. if len(unit) == 0: # check if the exponent is equal to 1 if exponent == 1: unit = '1' return unit, exponent def convert(self, unit, toSI=True, upper=True, isComposite=False): unit, exponent = self.removeExponentFromUnit(unit) # search for the unit isFound = False for siUnit, unitDict in self.units.items(): if unit in unitDict: conversion = unitDict[unit] isFound = True break # check if the unit is found if isFound: # retrun the conversion if it is found if toSI: out = conversion.convertToSI(upper, isComposite) else: out = conversion.convertFromSI(upper, isComposite) # the unti was found without looking for the prefix. Therefore the prefix must be 1 prefix = 1 else: # The unit was not found. This must be because the unit has a prefix prefix = unit[0:1] unit = unit[1:] if prefix not in self.prefixes: raise ValueError(f'The unit ({prefix}{unit}) was not found. Therefore it was interpreted as a prefix and a unit. However the prefix ({prefix}) was not found') # look for the unit without the prefix isFound = False for siUnit, unitDict in self.units.items(): if unit in unitDict: conversion = unitDict[unit] isFound = True break # check if the unit was found if not isFound: raise ValueError(f'The unit ({prefix}{unit}) was not found. Therefore it was interpreted as a prefix and a unit. However the unit ({unit}) was not found') # create the conversion if toSI: out = conversion.convertToSI(upper, isComposite) else: out = conversion.convertFromSI(upper, isComposite) # The prefix is inverted if the conversion is not to SI prefix = self.prefixes[prefix] if not upper: prefix = 1 / prefix if not toSI: prefix = 1 / prefix out[0] *= prefix return out, siUnit, exponent def divide(self, unit1, unit2): # determine the upper and lower units of unit 2 upperUnit2, lowerUnit2 = self.splitCompositeUnit(unit2) # flip unit 2 lowerUnit2, upperUnit2 = upperUnit2, lowerUnit2 unit2 = '' if len(upperUnit2) != 0: unit2 += '-'.join(upperUnit2) else: unit2 += '1' if len(lowerUnit2) != 0: if len(lowerUnit2) == 1: if lowerUnit2[0] == '1': pass else: unit2 += '/' + '-'.join(lowerUnit2) else: unit2 += '/' + '-'.join(lowerUnit2) return self.multiply(unit1, unit2) def multiply(self, unit1, unit2): # determine the upper and lower units of unit 1 upperUnit1, lowerUnit1 = self.splitCompositeUnit(unit1) # determine the upper and lower units of unit 2 upperUnit2, lowerUnit2 = self.splitCompositeUnit(unit2) # determine the combined upper and lower unit upper = upperUnit1 + upperUnit2 lower = lowerUnit1 + lowerUnit2 # cancle the upper and lower upper, lower = self.cancleUnits(upper, lower) # combine the upper and lower u = self.combineUpperAndLower(upper, lower) return u def power(self, unit1, power): # determine the upper and lower units upperUnit1, lowerUnit1 = self.splitCompositeUnit(unit1) # increase the exponent of the upper and lower if upperUnit1[0] != '1': for i in range(len(upperUnit1)): u = upperUnit1[i] u, exponent = self.removeExponentFromUnit(u) exponent *= power u = u + str(int(exponent)) upperUnit1[i] = u for i in range(len(lowerUnit1)): u = lowerUnit1[i] u, exponent = self.removeExponentFromUnit(u) exponent *= power u = u + str(int(exponent)) lowerUnit1[i] = u # combine the upper and lower u = self.combineUpperAndLower(upperUnit1, lowerUnit1) return u def cancleUnits(self, upper, lower): # replace units with exponents with multiple occurances of the unit in upper unitsToRemove = [] unitsToAdd = [] for up in upper: u, e = self.removeExponentFromUnit(up) if e != 1: unitsToRemove.append(up) unitsToAdd += [u] * e for u in unitsToRemove: upper.remove(u) for u in unitsToAdd: upper.append(u) # replace units with exponents with multiple occurances of the unit in lower unitsToRemove = [] unitsToAdd = [] for low in lower: u, e = self.removeExponentFromUnit(low) if e != 1: unitsToRemove.append(low) unitsToAdd += [u] * e for u in unitsToRemove: lower.remove(u) for u in unitsToAdd: lower.append(u) # cancle the upper and lower units unitsToRemove = [] done = False while not done: done = True for low in lower: if low in upper: upper.remove(low) lower.remove(low) done = False if done: break # remove '1' if len(upper) > 1: if '1' in upper: upper.remove('1') if len(lower) > 1: if '1' in lower: lower.remove('1') # determine the exponents of each unit in the upper upperWithExponents = [] if len(upper) != 0: done = False while not done: up = upper[0] exponent = upper.count(up) if exponent != 1: upperWithExponents.append(up + str(exponent)) else: upperWithExponents.append(up) upper = list(filter((up).__ne__, upper)) if len(upper) == 0: done = True # determine the exponents of each unit in the lower lowerWithExponents = [] if len(lower) != 0: done = False while not done: low = lower[0] exponent = lower.count(low) if exponent != 1: lowerWithExponents.append(low + str(exponent)) else: lowerWithExponents.append(low) lower = list(filter((low).__ne__, lower)) if len(lower) == 0: done = True return upperWithExponents, lowerWithExponents def combineUpperAndLower(self, upper, lower): # combine the upper and lower u = '' if len(upper) != 0: u += '-'.join(upper) else: u += '1' if len(lower) != 0: if len(lower) == 1: if lower[0] == '1': pass else: u += '/' + '-'.join(lower) else: u += '/' + '-'.join(lower) return u
def sortByHeight(a): heights = [] # Store all the heights in a list for i in range(len(a)): if a[i] != -1: heights.append(a[i]) # Sort the heights heights = sorted(heights) # Replace the heights in the original list j = 0 for i in range(len(a)): if a[i] != -1: a[i] = heights[j] j += 1 return a
# -*- coding: utf-8 -*- """ Created on Sat Feb 29 09:54:51 2020 @author: bruger """
class Solution: def reverseWords(self, set): return ' '.join(set.split()[::-1]) if __name__ == "__main__": solution = Solution() print(solution.reverseWords("the sky is blue")) print(solution.reverseWords(" hello world! "))
class SleuthError(Exception): pass class SleuthNotFoundError(SleuthError): pass
def insertion_sort(nums: list[float]) -> list[float]: for start in range(1, len(nums)): index = start while nums[index] < nums[index - 1] and index > 0: nums[index], nums[index - 1] = nums[index - 1], nums[index] index -= 1 return nums
# coding:utf-8 class FakeBot(object): """Fake Bot object """ def __init__(self): self.msg = '' def send_message(self, text='', **kwargs): self.msg = text class FakeUpdate(object): def __init__(self): self.message = FakeMessage() class FakeMessage(object): """Docstring for FakeMessage. """ def __init__(self): """TODO: to be defined1. """ self.chat_id = 1
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. bing_edge = { 'color': { 'color': '#228372' }, 'title': 'Bing-related Parsing Functions', 'label': 'B' } def run(unfurl, node): if node.data_type == 'url.query.pair': if 'bing' in unfurl.find_preceding_domain(node): if node.key == 'pq': unfurl.add_to_queue( data_type='descriptor', key=None, value=f'"Previous" Search Query: {node.value}', hover='Previous terms entered by the user; auto-complete or suggestions <br>' 'may have been used to reach the actual search terms (in <b>q</b>)', parent_id=node.node_id, incoming_edge_config=bing_edge) elif node.key == 'q': unfurl.add_to_queue( data_type='descriptor', key=None, value=f'Search Query: {node.value}', hover='Terms used in the Bing search', parent_id=node.node_id, incoming_edge_config=bing_edge) elif node.key == 'first': unfurl.add_to_queue( data_type='descriptor', key=None, value=f'Starting Result: {node.value}', hover='Bing search by default shows 8 results per page; higher <br>' '"first" values may indicate browsing more subsequent results pages.', parent_id=node.node_id, incoming_edge_config=bing_edge)
"""Module for user fixtures""" USER = { 'email': 'test_user@example.com', 'password': 'Password@1234', } USER_INVALID = {'email': '', 'password': ''} SUPERUSER = { 'email': 'test_userII@example.com', 'password': 'password1234', } UNREGISTERED_USER = { 'email': 'unregistered@example1.com', 'password': 'Password@1234' } TEST_AUTH_USER = { 'email': 'test_auth_user@example.com', 'password': 'Password@12345', }
class Term(str): @property def is_variable(self): return self[0] == "?" @property def is_constant(self): return self[0] != "?" @property def arity(self): return 0 class ListTerm(tuple): def __init__(self, *args): self.is_function = None tuple.__init__(self, *args) def function(self): ' must *only* be one level deep ' if self.is_function is None: self.is_function = True for term in self: if not isinstance(term, Term): self.is_function = False break return self.is_function @property def is_constant(self): ' all elements are constant (recursive definition) ' for term in self: if not term.is_constant: return False return True @property def arity(self): return len(self) def __str__(self): return "(%s)" % " ".join(str(x) for x in self) __repr__ = __str__ ############################################################################### def is_function(term): if isinstance(term, ListTerm): return term.function() return False ############################################################################### def tokenize(s): return s.replace('(', ' ( ').replace(')', ' ) ').split() class SymbolFactory(object): def __init__(self): self.symbol_pool = dict() def create(self, clz, *args): # make args hashable new_args = [] for arg in args: if isinstance(arg, list): arg = tuple(arg) new_args.append(arg) args = tuple(new_args) # symbol[clz] -> clz_pool[args] -> instance try: clz_pool = self.symbol_pool[clz] except KeyError: clz_pool = self.symbol_pool[clz] = {} try: instance = clz_pool[args] except KeyError: instance = clz_pool[args] = clz(*args) return instance def to_symbols(self, s): stack = [] current_list = [] # strip comment lines lines = [] for line in s.splitlines(): line = line.strip() useful = line.split(";") line = useful[0].strip() if line: lines.append(line) s = " ".join(lines) for token in tokenize(s): if token == '(': stack.append(current_list) current_list = [] elif token == ')': list_term = self.create(ListTerm, current_list) current_list = stack.pop() current_list.append(list_term) else: current_list.append(self.create(Term, token)) for sexpr in current_list: assert isinstance(sexpr, (Term, ListTerm)) yield sexpr def symbolize(self, string): ' takes a single symbol as a string and internalises ' line = list(self.to_symbols(string)) assert len(line) == 1 return line[0]
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def f_gold ( a , b , k ) : c1 = ( b - a ) - 1 c2 = ( k - b ) + ( a - 1 ) if ( c1 == c2 ) : return 0 return min ( c1 , c2 ) #TOFILL if __name__ == '__main__': param = [ (83,98,86,), (3,39,87,), (11,96,30,), (50,67,48,), (40,16,32,), (62,86,76,), (40,78,71,), (66,11,74,), (6,9,19,), (25,5,5,) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print("#Results: %i, %i" % (n_success, len(param)))
"""Exceptions of the pygmx package.""" class InvalidMagicException(Exception): pass class InvalidIndexException(Exception): pass class UnknownLenError(Exception): pass class FileTypeError(Exception): pass class XTCError(Exception): pass
def get_locale_name(something, lang): if type(something) == str: pass if type(something) == list: pass if type(something) == dict: pass
phoneNumber = {} x = int(input()) for i in range(x): name, number = input().split() phoneNumber[name] = number for i in range(x): query_name = input() if query_name in phoneNumber.keys(): print(query_name + "=" + phoneNumber[query_name]) else: print("Not found")
#!/usr/bin/python # -*- coding: utf-8 -*- # # Control.py # class Control: def __init__(self, TagName): self._hosted = False # Is added to Form self._script = '' # Javascript code (Used before added to Form) self._id = '' # Used By Form to identify control Dont change self._script += 'var Control = document.createElement("' \ + TagName + '");' self.events = {} self.Controls = [] pass # Below function is only used by Form ! def initialize(self, Id, parent): self._id = Id self.parent = parent self._hosted = True return self._script + 'Control.id = "' + self._id + '";' def child_handler(self): for Control in self.Controls: self.parent.Add(Control) self.send('appendChild(document.getElementById("' + Control.Id + '"));') @property def Id(self, Id=None): if self.Id == None: self.Id = Id return self.Id @Id.getter def Id(self): return self._id def send(self, message, evaluate=False): """This will change script(Javascript) """ if self._hosted == True: # check if control is added to Form if evaluate: return self.parent.evaluate('document.getElementById("' + self._id + '").' + message) else: self.parent.run('document.getElementById("' + self._id + '").' + message) else: self._script += 'Control.' self._script += message def run(self, script): self.send(script) def evaluate(self, script): self.send(script, True) def innerHTML(self, html): """The innerHTML property sets or returns the HTML content (inner HTML) of an element.""" self.send('innerHTML;', True) def innerHTML(self, html): self.send('innerHTML="' + html + '";') def setAttribute(self, attributename, attributevalue): """The setAttribute() method adds the specified attribute to an element, and gives it the specified value.""" self.send('setAttribute("' + attributename + '", "' + attributevalue + '");') def addEventListener( self, event, function, useCapture=False, ): """The addEventListener() method attaches an event handler to the specified element.""" self.events[str(event)] = function self.send('addEventListener("' + event + '", notify_server, ' + str(useCapture).lower() + ');') def fire_event(self, EventData): for event in self.events: if event == EventData['type']: f = self.events[event] f(self, EventData) def Width(self, w): self.send('width="' + str(w) + '";') def Height(self, h): self.send('height="' + str(h) + '";') def Size(self, w, h): self.Width(w) self.Height(h) def appendChild(self, node): if self._hosted: self.parent.Add(node) self.send('appendChild(document.getElementById("' + node.Id + '"));') else: self.Controls.append(node) def style(self, prop, style): self.send('style.' + prop + '="' + style + '";') def classList(self): def add(self, classname): self.send('classList.add("' + classname + '");')
AVAILABLE = [ { 'account_name': 'ExampleTwitterMarkovBot', # derived from https://twitter.com/ExampleTwitterMarkovBot 'corpora': ('example_corpus1', 'example_corpus2'), 'description': 'An example configuration for a bot instance', 'twitter_key': '', # twitter app API key 'twitter_secret': '' # twitter app API secret } ]
if __name__ == "__main__": with open("input.txt", "r") as f: input_list = f.readlines() x = 0 y = 0 for input in input_list: command, string_val = input.split(" ") val = int(string_val) if command == "forward": x += val elif command == "down": y += val elif command == "up": y -= val else: raise ValueError(f"{command} is not a valid command") print(x * y)
class TransactionError(Exception): pass class TransactionTimeoutError(Exception): pass class TransactionFinished(Exception): pass
class AdvancedArithmetic(object): def divisorSum(n): raise NotImplementedError class Calculator(AdvancedArithmetic): def divisorSum(self, n): divisor_sum = 0 for divisor in range(2, n): if n % divisor == 0: divisor_sum += divisor return divisor_sum + n + (0 if n is 1 else 1)
# Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of resource bundle/importing rules.""" load( "@bazel_skylib//lib:partial.bzl", "partial", ) load( "@build_bazel_rules_apple//apple:providers.bzl", "AppleResourceBundleInfo", ) load( "@build_bazel_rules_apple//apple:utils.bzl", "group_files_by_directory", ) load( "@build_bazel_rules_apple//apple/internal:resources.bzl", "NewAppleResourceInfo", "resources", ) def _apple_bundle_import_impl(ctx): """Implementation of the apple_bundle_import rule.""" bundle_groups = group_files_by_directory( ctx.files.bundle_imports, ["bundle"], attr = "bundle_imports", ) if len(bundle_groups) != 1: fail( "There has to be exactly 1 imported bundle. Found:\n{}".format( "\n".join(bundle_groups.keys()), ), ) parent_dir_param = partial.make( resources.bundle_relative_parent_dir, extension = "bundle", ) resource_provider = resources.bucketize( ctx.files.bundle_imports, parent_dir_param = parent_dir_param, ) return [ AppleResourceBundleInfo(), resource_provider, ] apple_bundle_import = rule( implementation = _apple_bundle_import_impl, attrs = { "bundle_imports": attr.label_list( allow_empty = False, allow_files = True, mandatory = True, doc = """ The list of files under a .bundle directory to be propagated to the top-level bundling target. """, ), }, doc = """ This rule encapsulates an already-built bundle. It is defined by a list of files in exactly one .bundle directory. apple_bundle_import targets need to be added to library targets through the data attribute, or to other resource targets (i.e. apple_resource_bundle) through the resources attribute. """, ) def _apple_resource_bundle_impl(ctx): providers = [] bundle_name = "{}.bundle".format(ctx.attr.bundle_name or ctx.label.name) infoplists = resources.collect(ctx.attr, res_attrs = ["infoplists"]) if infoplists: providers.append( resources.bucketize_typed( infoplists, "infoplists", parent_dir_param = bundle_name, ), ) resource_files = resources.collect(ctx.attr, res_attrs = ["resources"]) if resource_files: providers.append( resources.bucketize( resource_files, parent_dir_param = bundle_name, ), ) if ctx.attr.structured_resources: # Avoid processing PNG files that are referenced through the structured_resources # attribute. This is mostly for legacy reasons and should get cleaned up in the future. providers.append( resources.bucketize( resources.collect(ctx.attr, res_attrs = ["structured_resources"]), parent_dir_param = partial.make( resources.structured_resources_parent_dir, parent_dir = bundle_name, ), avoid_buckets = ["pngs"], ), ) # Find any targets added through resources which might propagate the NewAppleResourceInfo # provider, for example, apple_resource_bundle or apple_bundle_import targets. resource_providers = [ x[NewAppleResourceInfo] for x in ctx.attr.resources if NewAppleResourceInfo in x ] if resource_providers: resources_merged_provider = resources.merge_providers(resource_providers) providers.append(resources.nest_in_bundle(resources_merged_provider, bundle_name)) return [ AppleResourceBundleInfo(), resources.merge_providers(providers), ] apple_resource_bundle = rule( implementation = _apple_resource_bundle_impl, attrs = { "bundle_name": attr.string( doc = """ The desired name of the bundle (without the `.bundle` extension). If this attribute is not set, then the `name` of the target will be used instead. """, ), "infoplists": attr.label_list( allow_empty = True, allow_files = True, doc = """ Infoplist files to be merged into the bundle's Info.plist. Duplicate keys between infoplist files will cause an error if and only if the values conflict. Bazel will perform variable substitution on the Info.plist file for the following values (if they are strings in the top-level dict of the plist): ${BUNDLE_NAME}: This target's name and bundle suffix (.bundle or .app) in the form name.suffix. ${PRODUCT_NAME}: This target's name. ${TARGET_NAME}: This target's name. The key in ${} may be suffixed with :rfc1034identifier (for example ${PRODUCT_NAME::rfc1034identifier}) in which case Bazel will replicate Xcode's behavior and replace non-RFC1034-compliant characters with -. """, ), "resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the resource bundle. Files that are processable resources, like .xib, .storyboard, .strings, .png, and others, will be processed by the Apple bundling rules that have those files as dependencies. Other file types that are not processed will be copied verbatim. These files are placed in the root of the resource bundle (e.g. Payload/foo.app/bar.bundle/...) in most cases. However, if they appear to be localized (i.e. are contained in a directory called *.lproj), they will be placed in a directory of the same name in the app bundle. You can also add other `apple_resource_bundle` and `apple_bundle_import` targets into `resources`, and the resource bundle structures will be propagated into the final bundle. """, ), "structured_resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the final resource bundle. They are not processed or compiled in any way besides the processing done by the rules that actually generate them. These files are placed in the bundle root in the same structure passed to this argument, so ["res/foo.png"] will end up in res/foo.png inside the bundle. """, ), }, doc = """ This rule encapsulates a target which is provided to dependers as a bundle. An apple_resource_bundle's resources are put in a resource bundle in the top level Apple bundle dependent. apple_resource_bundle targets need to be added to library targets through the data attribute. """, ) def _apple_resource_group_impl(ctx): """Implementation of the apple_resource_group rule.""" resource_providers = [] if ctx.attr.resources: resource_files = resources.collect(ctx.attr, res_attrs = ["resources"]) if resource_files: resource_providers.append( resources.bucketize(resource_files), ) if ctx.attr.structured_resources: # TODO(kaipi): Validate that structured_resources doesn't have processable resources, # e.g. we shouldn't accept xib files that should be compiled before bundling. structured_files = resources.collect( ctx.attr, res_attrs = ["structured_resources"], ) # Avoid processing PNG files that are referenced through the structured_resources # attribute. This is mostly for legacy reasons and should get cleaned up in the future. resource_providers.append( resources.bucketize( structured_files, parent_dir_param = partial.make( resources.structured_resources_parent_dir, ), avoid_buckets = ["pngs"], ), ) # Find any targets added through resources which might propagate the NewAppleResourceInfo # provider, for example, other apple_resource_group and apple_resource_bundle targets. resource_providers.extend([ x[NewAppleResourceInfo] for x in ctx.attr.resources if NewAppleResourceInfo in x ]) if resource_providers: # If any providers were collected, merge them. return [resources.merge_providers(resource_providers)] return [] apple_resource_group = rule( implementation = _apple_resource_group_impl, attrs = { "resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the final bundle that depends on this target. Files that are processable resources, like .xib, .storyboard, .strings, .png, and others, will be processed by the Apple bundling rules that have those files as dependencies. Other file types that are not processed will be copied verbatim. These files are placed in the root of the final bundle (e.g. Payload/foo.app/...) in most cases. However, if they appear to be localized (i.e. are contained in a directory called *.lproj), they will be placed in a directory of the same name in the app bundle. You can also add apple_resource_bundle and apple_bundle_import targets into `resources`, and the resource bundle structures will be propagated into the final bundle. """, ), "structured_resources": attr.label_list( allow_empty = True, allow_files = True, doc = """ Files to include in the final application bundle. They are not processed or compiled in any way besides the processing done by the rules that actually generate them. These files are placed in the bundle root in the same structure passed to this argument, so ["res/foo.png"] will end up in res/foo.png inside the bundle. """, ), }, doc = """ This rule encapsulates a target which provides resources to dependents. An apple_resource_group's resources are put in the top-level Apple bundle dependent. apple_resource_group targets need to be added to library targets through the data attribute. If `apple_resource_bundle` or `apple_bundle_import` dependencies are added to `resources`, the resource bundle structures are maintained at the final top-level bundle. """, )
def gcd(a: int, b: int) -> int: # supposed a >= b if b > a: return gcd(b, a) elif a % b == 0: return b return gcd(b, a % b)
def data_loader(f_name, l_name): with open(f_name, mode='r', encoding='utf-8') as f: data = list(set(f.readlines())) label = [l_name for i in range(len(data))] return data, label
# 2. Words Lengths # Using a list comprehension, write a program that receives some text, separated by comma and space ", ", # and prints on the console each string with its length in the following format: # "{first_str} -> {first_str_len}, {second_str} -> {second_str_len},…" print(', '.join([f"{word} -> {len(word)}" for word in input().split(', ')]))
# TODO: maybe store them .sql files and read them as string # example: https://cloud.google.com/blog/products/application-development/how-to-schedule-a-recurring-python-script-on-gcp # def file_to_string(sql_path): # """Converts a SQL file holding a SQL query to a string. # Args: # sql_path: String containing a file path # Returns: # String representation of a file's contents # """ # with open(sql_path, 'r') as sql_file: # return sql_file.read() sample_events_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, event_date AS event_date, event_timestamp AS event_timestamp, event_name AS event_name, event_previous_timestamp AS event_previous_timestamp, event_value_in_usd AS event_value_in_usd, event_bundle_sequence_id AS event_bundle_sequence_id, event_server_timestamp_offset AS event_server_timestamp_offset, user_id AS user_id, user_pseudo_id AS user_pseudo_id, privacy_info.analytics_storage AS privacy_info_analytics_storage, privacy_info.ads_storage AS privacy_info_ads_storage, privacy_info.uses_transient_token AS privacy_info_uses_transient_token, user_first_touch_timestamp AS user_first_touch_timestamp, user_ltv.revenue AS user_ltv_revenue, user_ltv.currency AS user_ltv_currency, device.category AS device_category, device.mobile_brand_name AS device_mobile_brand_name, device.mobile_model_name AS device_mobile_model_name, device.mobile_marketing_name AS device_mobile_marketing_name, device.mobile_os_hardware_model AS device_mobile_os_hardware_model, device.operating_system AS device_operating_system, device.operating_system_version AS device_operating_system_version, device.vendor_id AS device_vendor_id, device.advertising_id AS device_advertising_id, device.language AS device_language, device.is_limited_ad_tracking AS device_is_limited_ad_tracking, device.time_zone_offset_seconds AS device_time_zone_offset_seconds, device.browser AS device_browser, device.browser_version AS device_browser_version, device.web_info.browser AS device_web_info_browser, device.web_info.browser_version AS device_web_info_browser_version, device.web_info.hostname AS device_web_info_hostname, geo.continent AS geo_continent, geo.country AS geo_country, geo.region AS geo_region, geo.city AS geo_city, geo.sub_continent AS geo_sub_continent, geo.metro AS geo_metro, app_info.id AS app_info_id, app_info.version AS app_info_version, app_info.install_store AS app_info_install_store, app_info.firebase_app_id AS app_info_firebase_app_id, app_info.install_source AS app_info_install_source, traffic_source.name AS traffic_source_name, traffic_source.medium AS traffic_source_medium, traffic_source.source AS traffic_source_source, stream_id AS stream_id, platform AS platform, event_dimensions.hostname AS event_dimensions_hostname, ecommerce.total_item_quantity AS ecommerce_total_item_quantity, ecommerce.purchase_revenue_in_usd AS ecommerce_purchase_revenue_in_usd, ecommerce.purchase_revenue AS ecommerce_purchase_revenue, ecommerce.refund_value_in_usd AS ecommerce_refund_value_in_usd, ecommerce.refund_value AS ecommerce_refund_value, ecommerce.shipping_value_in_usd AS ecommerce_shipping_value_in_usd, ecommerce.shipping_value AS ecommerce_shipping_value, ecommerce.tax_value_in_usd AS ecommerce_tax_value_in_usd, ecommerce.tax_value AS ecommerce_tax_value, ecommerce.unique_items AS ecommerce_unique_items, ecommerce.transaction_id AS ecommerce_transaction_id FROM `gcp-project.dataset.events_date_shard` """ sample_event_params_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, event_params.key as event_params_key, CONCAT(IFNULL(event_params.value.string_value, ''), IFNULL(CAST(event_params.value.int_value AS STRING), ''), IFNULL(CAST(event_params.value.float_value AS STRING), ''), IFNULL(CAST(event_params.value.double_value AS STRING), '')) AS event_params_value FROM `gcp-project.dataset.events_date_shard` ,UNNEST (event_params) AS event_params """ sample_user_properties_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, user_properties.key as user_properties_key , CONCAT(IFNULL(user_properties.value.string_value, ''), IFNULL(CAST(user_properties.value.int_value AS STRING), ''), IFNULL(CAST(user_properties.value.float_value AS STRING), ''), IFNULL(CAST(user_properties.value.double_value AS STRING), '')) AS user_properties_value, user_properties.value.set_timestamp_micros as user_properties_value_set_timestamp_micros FROM `gcp-project.dataset.events_date_shard` ,UNNEST (user_properties) AS user_properties """ sample_items_query = """ SELECT CONCAT(stream_id, "_" , user_pseudo_id, "_" , event_name, "_" , event_timestamp) AS event_id, items.item_id AS items_item_id, items.item_name AS items_item_name, items.item_brand AS items_item_brand, items.item_variant AS items_item_variant, items.item_category AS items_item_category, items.item_category2 AS items_item_category2, items.item_category3 AS items_item_category3, items.item_category4 AS items_item_category4, items.item_category5 AS items_item_category5, items.price_in_usd AS items_price_in_usd, items.price AS items_price, items.quantity AS items_quantity, items.item_revenue_in_usd AS items_item_revenue_in_usd, items.item_revenue AS items_item_revenue, items.item_refund_in_usd AS items_item_refund_in_usd, items.item_refund AS items_item_refund, items.coupon AS items_coupon, items.affiliation AS items_affiliation, items.location_id AS items_location_id, items.item_list_id AS items_item_list_id, items.item_list_name AS items_item_list_name, items.item_list_index AS items_item_list_index, items.promotion_id AS items_promotion_id, items.promotion_name AS items_promotion_name, items.creative_name AS items_creative_name, items.creative_slot AS items_creative_slot FROM `gcp-project.dataset.events_date_shard` ,UNNEST(items) AS items """
def in_order_traversal(node, visit_func): if node is not None: in_order_traversal(node.left, visit_func) visit_func(node.data) in_order_traversal(node.right, visit_func) def pre_order_traversal(node, visit_func): if node is not None: visit_func(node.data) pre_order_traversal(node.left, visit_func) pre_order_traversal(node.right, visit_func) def post_order_traversal(node, visit_func): if node is not None: post_order_traversal(node.left, visit_func) post_order_traversal(node.right, visit_func) visit_func(node.data)
''' Desenvolva um algoritmo que solicite seu ano de nacimento e o ano anoAtual Calcule a idade e apresente na tela. Para fins de simplificacao, despreze o dia e mes do ano. Apos o calculo verifique se a idade e maior ou igual a 18 anos e apresente na tela a mensagen informando que ja e possivel tirar a carteira de motorista caso seja de maior. ''' nasc = int(input("Digite seu ano de nacimento: \n")) anoAtual = int(input('Digite o ano atual: \n')) r = anoAtual - nasc print('Voce tem {} anos de idade'.format(r)) if(r >= 18): print("Ual vc ja pode tirar sua carteira com {} anos ".format(r))
#!/usr/bin/env python3 # Write a program that computes the GC fraction of a DNA sequence in a window # Window size is 11 nt # Output with 4 significant figures using whichever method you prefer # Use no nested loops # Describe the pros/cons of this algorith vs. nested loops seq = 'ACGACGCAGGAGGAGAGTTTCAGAGATCACGAATACATCCATATTACCCAGAGAGAG' w = 11 gc = 0 for i in range(0,w): if seq[i] == "G" or seq[i] == "C": gc += 1 print(0,seq[0:w],f'{gc/w:.4f}') for i in range(1, len(seq)-(w-1)): prev_nt = seq[i-1] next_nt = seq[i+w-1] if prev_nt == 'G' or prev_nt == "C": gc -= 1 if next_nt == 'G' or next_nt == "C": gc += 1 print(i,seq[i:i+w],f'{gc/w:.4f}') """ The nested loop algorithm individually calculates the GC content for each window. This takes a lot of computing power when each window has only a single letter difference from the previous window. The separate loops algorithm save computing power by sliding the window along the sequence and adding and/or subtracting the Gs and Cs from the existing fraction as they enter and leave the window. Separate loops are much faster for long windows. The cons of using the separate loops algorithm is that it does not assess each window separately and depends on the surrounding sequence to determine GC content. If the windows are separated by more than 1 base pair, the algorithm must be reprogrammed.  The prev and next variables have to become sequences instead of single nucleotides and the GC content of these prev and next variables has to be calculated and deleted from the existing fraction to get the right answer. This takes longer to re-program than the nested loop algorithm. In the nested loop algorithm, the range in the outer loop can be easily changed to index a different window from the sequence. """ """ 0 ACGACGCAGGA 0.6364 1 CGACGCAGGAG 0.7273 2 GACGCAGGAGG 0.7273 3 ACGCAGGAGGA 0.6364 4 CGCAGGAGGAG 0.7273 5 GCAGGAGGAGA 0.6364 6 CAGGAGGAGAG 0.6364 7 AGGAGGAGAGT 0.5455 8 GGAGGAGAGTT 0.5455 9 GAGGAGAGTTT 0.4545 10 AGGAGAGTTTC 0.4545 11 GGAGAGTTTCA 0.4545 12 GAGAGTTTCAG 0.4545 13 AGAGTTTCAGA 0.3636 14 GAGTTTCAGAG 0.4545 15 AGTTTCAGAGA 0.3636 16 GTTTCAGAGAT 0.3636 17 TTTCAGAGATC 0.3636 18 TTCAGAGATCA 0.3636 19 TCAGAGATCAC 0.4545 20 CAGAGATCACG 0.5455 21 AGAGATCACGA 0.4545 22 GAGATCACGAA 0.4545 23 AGATCACGAAT 0.3636 24 GATCACGAATA 0.3636 25 ATCACGAATAC 0.3636 26 TCACGAATACA 0.3636 27 CACGAATACAT 0.3636 28 ACGAATACATC 0.3636 29 CGAATACATCC 0.4545 30 GAATACATCCA 0.3636 31 AATACATCCAT 0.2727 32 ATACATCCATA 0.2727 33 TACATCCATAT 0.2727 34 ACATCCATATT 0.2727 35 CATCCATATTA 0.2727 36 ATCCATATTAC 0.2727 37 TCCATATTACC 0.3636 38 CCATATTACCC 0.4545 39 CATATTACCCA 0.3636 40 ATATTACCCAG 0.3636 41 TATTACCCAGA 0.3636 42 ATTACCCAGAG 0.4545 43 TTACCCAGAGA 0.4545 44 TACCCAGAGAG 0.5455 45 ACCCAGAGAGA 0.5455 46 CCCAGAGAGAG 0.6364 """
class Feature: def __init__(self, name, selector, data_type, number_of_values, patterns): self.name = name self.selector = selector self.pattern = patterns[data_type] self.multiple_values = number_of_values != 'single'
s = c = 0 while True: n = int(input('Digite um número: ')) if n == 999: break s += n c += 1 print(f'Você digitou \033[32m{c}\033[m e soma entre eles é iqual a \033[33m{s}.')
## rolling mean & variance class OnlineStats: def __init__(self): self.reset() def reset(self) -> None: self.n = 0 self.mean = 0.0 self.m2 = 0.0 def update(self, x: float) -> None: """Update stats for new observation.""" self.n += 1 new_mean = self.mean + (x - self.mean) / self.n self.m2 += (x - self.mean) * (x - new_mean) self.mean = new_mean @property def var(self) -> float: if self.n > 1: return self.m2 / (self.n - 1) else: return 0.0 @property def precision(self) -> float: """Inverse of variance.""" if self.n > 1: return (self.n - 1) / max(self.m2, 1.0e-6) else: return 1.0 @property def std(self) -> float: return self.var ** 0.5 def __repr__(self): return f'<OnlineStats mean={self.mean} std={self.std}'
def get_multiples(num=1,c=10): # if n > 0: (what about negative multiples?) a = 0 num2 = num while a < c: if num2%num == 0: yield num2 num2 += 1 a += 1 else: num2 += 1 multiples_two = get_multiples(2,3) for i in multiples_two: print(i) default_multiples = get_multiples() multiples_5 = get_multiples(5,6) l = [] for i in range(6): l.append(next(multiples_5)) print(l) # OR for i in range(11): # this results in a StopIteration (as by default c=10) print(next(default_multiples))
# Copyright 2010-2011, RTLCores. All rights reserved. # http://rtlcores.com # See LICENSE.txt class CmdArgs(list): def __init__(self, value=[], cmd=None): list.__init__(self, value) self.cmd = cmd def conv(self): if(self.cmd == None): return self else: return self.cmd(self)
def main() -> None: n, m = map(int, input().split()) g = [[] for _ in range(n)] for _ in range(m): u, v = map(int, input().split()) u -= 1 v -= 1 g[u].append(v) g[v].append(u) INF = 1 << 60 min_length = [INF] * (1 << n) remain = 1 << n min_length[0] = 0 remain -= 1 # bfs added_to_que = [[False] * n for _ in range(1 << n)] for i in range(n): added_to_que[1 << i][i] = True current_length = 1 que = [(1 << i, i) for i in range(n)] while remain: new_que = [] for s, i in que: if min_length[s] == INF: min_length[s] = current_length remain -= 1 for j in g[i]: t = s ^ (1 << j) if added_to_que[t][j]: continue added_to_que[t][j] = True new_que.append((t, j)) current_length += 1 que = new_que print(sum(min_length)) if __name__ == "__main__": main()
"""A rich command line interface for PyPI.""" __name__ = "pypi-command-line" __title__ = __name__ __license__ = "MIT" __version__ = "0.4.0" __author__ = "Arian Mollik Wasi" __github__ = "https://github.com/wasi-master/pypi-cli"
""" Name: MultiSURF.py Authors: Gediminas Bertasius and Ryan Urbanowicz - Written at Dartmouth College, Hanover, NH, USA Contact: ryan.j.urbanowicz@darmouth.edu Created: December 4, 2013 Modified: August 25,2014 Description: --------------------------------------------------------------------------------------------------------------------------------------------------------- ReBATE V1.0: includes stand-alone Python code to run any of the included/available Relief-Based algorithms designed for attribute filtering/ranking. These algorithms are a quick way to identify attributes in the dataset that may be most important to predicting some phenotypic endpoint. These scripts output an ordered set of attribute names, along with respective scores (uniquely determined by the particular algorithm selected). Certain algorithms require key run parameters to be specified. This code is largely based on the Relief-Based algorithms implemented in the Multifactor Dimensionality Reduction (MDR) software. However these implementations have been expanded to accomodate continuous attributes (and continuous attributes mixed with discrete attributes) as well as a continuous endpoint. This code also accomodates missing data points. Built into this code, is a strategy to automatically detect from the loaded data, these relevant characteristics. Copyright (C) 2013 Ryan Urbanowicz This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABLILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA --------------------------------------------------------------------------------------------------------------------------------------------------------- """ def Run_MultiSURF(data): """ Called to run the MultiSURF algorithm. #PARAM x- is a matrix containing the attributes of all instances in the dataset #PARAM y- is a matrix containing the class of a data instance """ x = [ row[0] for row in data.trainFormatted ] y = [ row[1] for row in data.trainFormatted ] print("Running MultiSURF Algorithm...") scores=MultiSURF(x,y,data) print("MultiSURF run complete.") return scores def MultiSURF(x,y,data): """ Controls major MultiSURF loops. """ ScoreList=[] for i in range(data.numAttributes): #initializing attributes' scores to 0 ScoreList.append(0) #Precompute distances between all unique instance pairs within the dataset. print("Precomputing Distance Array") distanceArray = calculateDistanceArray(x,data) print("Computed") same_class_bound = data.phenSD D=[] avg_distances=[] for i in range(data.numTrainInstances): dist_vector=[] dist_vector=get_individual_distances(i,data,distanceArray) avg_distances.append(get_average(dist_vector)) std_dev=get_std_dev(dist_vector,avg_distances[i]) D.append(std_dev/2.0) for k in range(data.numAttributes): #looping through attributes if data.attributeInfo[k][0]: #Continuous Attribute minA=data.attributeInfo[k][1][0] maxA=data.attributeInfo[k][1][1] count_hit_near=0 count_miss_near=0 count_hit_far=0 count_miss_far=0 diff_hit_near=0 #initializing the score to 0 diff_miss_near=0 diff_hit_far=0 diff_miss_far=0 for i in range(data.numTrainInstances): for j in range(i,data.numTrainInstances): if i!=j and x[i][k]!=data.labelMissingData and x[j][k]!=data.labelMissingData: locator = [i,j] locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy) d = distanceArray[locator[0]][locator[1]] if (d<avg_distances[i]-D[i]): #Near if data.discretePhenotype: #discrete endpoint if y[i]==y[j]: #Same Endpoint count_hit_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute (closer att scores for near same phen should yield larger att penalty) diff_hit_near-=(abs(x[i][k]-x[j][k])/(maxA-minA)) else:#Discrete diff_hit_near-=1 else: #Different Endpoint count_miss_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute (farther att scores for near diff phen should yield larger att bonus) diff_miss_near+=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete diff_miss_near+=1 else:#continuous endpoint if abs(y[i]-y[j])<same_class_bound: count_hit_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_near-=(abs(x[i][k]-x[j][k])/(maxA-minA)) else:#Discrete diff_hit_near-=1 else: count_miss_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute diff_miss_near+=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete diff_miss_near+=1 if (d>avg_distances[i]+D[i]): #Far if data.discretePhenotype: #discrete endpoint if y[i]==y[j]: count_hit_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_far-=(abs(x[i][k]-x[j][k]))/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_hit_far-=1 else: count_miss_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_miss_far+=abs(x[i][k]-x[j][k])/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_miss_far+=1 else:#continuous endpoint if abs(y[i]-y[j])<same_class_bound: count_hit_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_far-=(abs(x[i][k]-x[j][k]))/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_hit_far-=1 else: count_miss_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_miss_far+=abs(x[i][k]-x[j][k])/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_miss_far+=1 hit_proportion=count_hit_near/float(count_hit_near+count_miss_near) miss_proportion=count_miss_near/float(count_hit_near+count_miss_near) diff=diff_hit_near*miss_proportion+diff_miss_near*hit_proportion #applying weighting scheme to balance the scores hit_proportion=count_hit_far/float(count_hit_far+count_miss_far) miss_proportion=count_miss_far/float(count_hit_far+count_miss_far) diff+=diff_hit_far*miss_proportion+diff_miss_far*hit_proportion #applying weighting scheme to balance the scores ScoreList[k]+=diff return ScoreList def multiClassMultiSURF(x,y,data): """ Controls major MultiSURF loops. """ ScoreList=[] for i in range(data.numAttributes): #initializing attributes' scores to 0 ScoreList.append(0) #Precompute distances between all unique instance pairs within the dataset. print("Precomputing Distance Array") distanceArray = calculateDistanceArray(x,data) print("Computed") #For MulitClass Array Only multiclass_map = None if data.discretePhenotype and len(data.phenotypeList) > 2: multiclass_map = makeMultiClassMap(y,data) D=[] avg_distances=[] for i in range(data.numTrainInstances): dist_vector=[] dist_vector=get_individual_distances(i,data,distanceArray) avg_distances.append(get_average(dist_vector)) std_dev=get_std_dev(dist_vector,avg_distances[i]) D.append(std_dev/2.0) for k in range(data.numAttributes): #looping through attributes if data.attributeInfo[k][0]: #Continuous Attribute minA=data.attributeInfo[k][1][0] maxA=data.attributeInfo[k][1][1] count_hit_near=0 count_miss_near=0 count_hit_far=0 count_miss_far=0 diff_hit_near=0 #initializing the score to 0 diff_miss_near=0 diff_hit_far=0 diff_miss_far=0 class_Store_near = makeClassPairMap(multiclass_map) class_Store_far = makeClassPairMap(multiclass_map) for i in range(data.numTrainInstances): for j in range(i,data.numTrainInstances): if i!=j and x[i][k]!=data.labelMissingData and x[j][k]!=data.labelMissingData: locator = [i,j] locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy) d = distanceArray[locator[0]][locator[1]] if (d<avg_distances[i]-D[i]): #Near if y[i]==y[j]: count_hit_near+=1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_near-=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete diff_hit_near-=1 else: count_miss_near+=1 locator = [y[i],y[j]] locator = sorted(locator, reverse = True) tempString = str(locator[0])+str(locator[1]) class_Store_near[tempString][0] += 1 if x[i][k]!=x[j][k]: if data.attributeInfo[k][0]: #Continuous Attribute class_Store_near[tempString][1]+=abs(x[i][k]-x[j][k])/(maxA-minA) else:#Discrete class_Store_near[tempString][1]+=1 if (d>avg_distances[i]+D[i]): #Far if y[i]==y[j]: count_hit_far+=1 if data.attributeInfo[k][0]: #Continuous Attribute diff_hit_far-=(1-abs(x[i][k]-x[j][k]))/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: diff_hit_far-=1 else: count_miss_far+=1 locator = [y[i],y[j]] locator = sorted(locator, reverse = True) tempString = str(locator[0])+str(locator[1]) class_Store_far[tempString][0] += 1 if data.attributeInfo[k][0]: #Continuous Attribute class_Store_far[tempString][1]+=abs(x[i][k]-x[j][k])/(maxA-minA) #Attribute being similar is more important. else:#Discrete if x[i][k]==x[j][k]: class_Store_far[tempString][1]+=1 #Near missSum = 0 for each in class_Store_near: missSum += class_Store_near[each][0] hit_proportion=count_hit_near/float(count_hit_near+count_miss_near) #Correcting for Missing Data. miss_proportion=count_miss_near/float(count_hit_near+count_miss_near) for each in class_Store_near: diff_miss_near += (class_Store_near[each][0]/float(missSum))*class_Store_near[each][1] diff_miss_near = diff_miss_near * float(len(class_Store_near)) diff = diff_miss_near*hit_proportion + diff_hit_near*miss_proportion #Far missSum = 0 for each in class_Store_far: missSum += class_Store_far[each][0] hit_proportion=count_hit_far/float(count_hit_far+count_miss_far) #Correcting for Missing Data. miss_proportion=count_miss_far/float(count_hit_far+count_miss_far) for each in class_Store_far: diff_miss_far += (class_Store_far[each][0]/float(missSum))*class_Store_far[each][1] diff_miss_far = diff_miss_far * float(len(class_Store_far)) diff += diff_miss_far*hit_proportion + diff_hit_far*miss_proportion ScoreList[k]+=diff return ScoreList def get_std_dev(dist_vector,avg): sum=0; for i in range(len(dist_vector)): sum+=(dist_vector[i]-avg)**2 sum=sum/float(len(dist_vector)) return (sum**0.5) def get_average(dist_vector): sum=0 for i in range(len(dist_vector)): sum+=dist_vector[i]; return sum/float(len(dist_vector)) def get_individual_distances(i,data,distanceArray): d=[] for j in range(data.numTrainInstances): if (i!=j): locator = [i,j] locator = sorted(locator, reverse=True) #Access corect half of table (result of removed table redundancy) d.append(distanceArray[locator[0]][locator[1]]) return d def calculateDistanceArray(x,data): #make empty distance array container (we will only fill up the non redundant half of the array distArray = [] for i in range(data.numTrainInstances): distArray.append([]) for j in range(data.numTrainInstances): distArray[i].append(None) for i in range(1, data.numTrainInstances): for j in range(0,i): distArray[i][j] = calculate_distance(x[i],x[j],data) return distArray def makeMultiClassMap(y, data): #finding number of classes in the dataset and storing them into the map multiclass_map={} for i in range(data.numTrainInstances): if (y[i] not in multiclass_map): multiclass_map[y[i]]=0 else: multiclass_map[y[i]]+=1 for each in data.phenotypeList: #For each class store probability of class occurrence in dataset. multiclass_map[each] = multiclass_map[each]/float(data.numTrainInstances) return multiclass_map def makeClassPairMap(multiclass_map): #finding number of classes in the dataset and storing them into the map classPair_map={} for each in multiclass_map: for other in multiclass_map: if each != other: locator = [each,other] locator = sorted(locator, reverse = True) tempString = str(locator[0])+str(locator[1]) if (tempString not in classPair_map): classPair_map[tempString] = [0,0] return classPair_map def calculate_distance(a,b,data): """ Calculates the distance between two instances in the dataset. Handles discrete and continuous attributes. Continuous attributes are accomodated by scaling the distance difference within the context of the observed attribute range. If a respective data point is missing from either instance, it is left out of the distance calculation. """ d=0 #distance for i in range(data.numAttributes): if a[i]!=data.labelMissingData and b[i]!=data.labelMissingData: if not data.attributeInfo[i][0]: #Discrete Attribute if a[i] != b[i]: d+=1 else: #Continuous Attribute min_bound=float(data.attributeInfo[i][1][0]) max_bound=float(data.attributeInfo[i][1][1]) d+=abs(float(a[i])-float(b[i]))/float(max_bound-min_bound) #Kira & Rendell, 1992 -handling continiuous attributes return d
# import libraries and modules # provide a class storing the configuration of the back-end engine class LXMconfig: # constructor of the class def __init__(self): self.idb_c1 = None self.idb_c2 = None self.idb_c3 = None self.lxm_conf = {} self.program_name = "lx_allocator" self.lxm_conf["header_request_id"] = 0 self.lxm_conf["header_forwarded_for"] = 1 self.lxm_conf["service_ip"] = "0.0.0.0" self.lxm_conf["service_port"] = 9000 self.lxm_conf["influx_server"] = "0.0.0.0" self.lxm_conf["influx_port"] = 8086 self.lxm_conf["debug"] = 1 self.lxm_conf["hard_exit"] = True self.lxm_conf["hard_startup"] = True self.lxm_conf["lxm_db1"] = "lxm_ddi_performance" self.lxm_conf["lxm_db2"] = "lxm_allocation" self.lxm_conf["lxm_db3"] = "lxm_maintenance" self.lxm_conf["cleanup_maintenance"] = 1 self.lxm_conf["backend_URL"] = None self.lxm_conf["keycloak_URL"] = None self.lxm_conf["KC_REALM"] = None self.lxm_conf["KC_CLID"] = None self.lxm_conf["KC_SECRET"] = None self.lxm_conf["heappe_middleware_available"] = 0 self.lxm_conf["openstack_available"] = 0 self.lxm_conf["hpc_centers"] = None self.lxm_conf["heappe_service_URLs"] = None self.lxm_conf["transfer_sizes"] = "" self.lxm_conf["transfer_speeds"] = "" # define configuration routines def getConfiguration(self, conf_path): config = conf_path conf = open(config, "r") go = True while go: line = conf.readline() if line == "": go = False else: # the character '#' is used to put a line comment in the # configuration file if (line[0] == "#") or (line[0] == "\n") or (line[0] == "\t"): continue fields = line.split("=") param = str(fields[0]) value = str(fields[1]) param = param.strip("\n ") value = value.strip("\n ") # parse the file and create the configuration dictionary if param == "influx_server": self.lxm_conf["influx_server"] = value elif param == "influx_port": self.lxm_conf["influx_port"] = int(value) elif param == "debug": self.lxm_conf["debug"] = int(value) elif param == "service_ip": self.lxm_conf["service_ip"] = value elif param == "service_port": self.lxm_conf["service_port"] = int(value) elif param == "influx_db1": self.lxm_conf["lxm_db1"] = value elif param == "influx_db2": self.lxm_conf["lxm_db2"] = value elif param == "influx_db3": self.lxm_conf["lxm_db3"] = value elif param == "cleanup_maintenance": self.lxm_conf["cleanup_maintenance"] = int(value) elif param == "header_request_id": self.lxm_conf["header_request_id"] = int(value) elif param == "header_forwarded_for": self.lxm_conf["header_forwarded_for"] = int(value) elif param == "hard_exit": if int(value) == 1: self.lxm_conf["hard_exit"] = True else: self.lxm_conf["hard_exit"] = False elif param == "hard_startup": if int(value) == 1: self.lxm_conf["hard_startup"] = True else: self.lxm_conf["hard_startup"] = False elif param == "hpc_centers": self.lxm_conf["hpc_centers"] = value elif param == "transfer_sizes": self.lxm_conf["transfer_sizes"] = value elif param == "transfer_speeds": self.lxm_conf["transfer_speeds"] = value elif param == "heappe_middleware_available": self.lxm_conf["heappe_middleware_available"] = int(value) elif ( param == "heappe_service_URLs" and self.lxm_conf["heappe_middleware_available"] == 1 ): self.lxm_conf["heappe_service_URLs"] = value elif param == "openstack_available": self.lxm_conf["openstack_available"] = int(value) elif param == "backend_URL": self.lxm_conf["backend_URL"] = value elif param == "keycloak_URL": self.lxm_conf["keycloak_URL"] = value elif param == "KC_REALM": self.lxm_conf["KC_REALM"] = value elif param == "KC_CLID": self.lxm_conf["KC_CLID"] = value elif param == "KC_SECRET": self.lxm_conf["KC_SECRET"] = value else: print(" error - unrecognized option (%s)" % (param)) conf.close() return 0 # print out the current configuration def postConfiguration(self): if self.lxm_conf["debug"] == 1: print(" --------------------------------------------------------") print(" Backend Service Configuration:") print(" --------------------------------------------------------") if self.lxm_conf["hard_exit"]: print(" hard-exit mode : True") else: print(" hard-exit mode : False") if self.lxm_conf["hard_startup"]: print(" hard-startup mode : True") else: print(" hard-startup mode : False") print(" debug mode : %d" % (self.lxm_conf["debug"])) print(" service address : %s " % (self.lxm_conf["service_ip"])) print(" service port : %-5d " % (self.lxm_conf["service_port"])) print(" influxDB address : %s " % (self.lxm_conf["influx_server"])) print(" influxDB port : %-5d " % (self.lxm_conf["influx_port"])) print(" db_client_1 : %s " % (self.lxm_conf["lxm_db1"])) print(" db_client_2 : %s " % (self.lxm_conf["lxm_db2"])) print(" db_client_3 : %s " % (self.lxm_conf["lxm_db3"])) print(" --------------------------------------------------------") # executing some action before serving the routes (initialization) if self.lxm_conf["debug"] == 1: print(" (dbg) the webapp is more verbose to support debugging") return 0
{ "targets": [ { "target_name": "priorityqueue_native", "sources": [ "src/priorityqueue_native.cpp", "src/ObjectHolder.cpp", "src/index.d.ts"], "cflags": ["-Wall", "-std=c++11"], 'xcode_settings': { 'OTHER_CFLAGS': [ '-std=c++11' ], }, "conditions": [ [ 'OS=="mac"', { "xcode_settings": { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'OTHER_CPLUSPLUSFLAGS' : ['-std=c++11','-stdlib=libc++'], 'OTHER_LDFLAGS': ['-stdlib=libc++'], 'MACOSX_DEPLOYMENT_TARGET': '10.7' } } ] ] } ] }
class User: """ User class for creating password locker account and logging in """ user_credentials = [] def __init__(self, fullname, username, password): self.fullname = fullname self.username = username self.password = password def save_user(self): """ a funtion for saving user credentials after creating a account """ User.user_credentials.append(self) @classmethod def verify_user(cls, user_name, user_password): """ verify is the user has created an account and exists in the list.Returns a boolean value """ if len(cls.user_credentials) == 0: return False else: for user in cls.user_credentials: if user.username == user_name and user.password == user_password: return True return False
"""Breadth-first search shortest path implementations.""" def bfs_shortest_path(graph, x, y): """Find shortest number of edges between nodes x and y. :x: a node :y: a node :Returns: shortest number of edges from node x to y or -1 if none exists """ if x == y: return 0 visited = [x] q = [x] # keep tab on distances from `x` dist = {x: 0, y: -1} while q: v = q.pop(0) if v == y: dist[y] = dist[v] if dist[y] == -1 else min(dist[y], dist[v]) for a in graph[v]: if a not in visited: visited.append(a) dist[a] = dist[v] + 1 q.append(a) return dist[y] def bfs_shortest_path_print(graph, x, y): """Return shortest path between nodes x and y.""" visited = [] q = [[x]] while q: path = q.pop(0) node = path[-1] if node not in visited: for adjacent in graph[node]: new_path = list(path) new_path.append(adjacent) if adjacent == y: return new_path q.append(new_path) visited.append(node) return f'No path from {x} to {y}.' if __name__ == '__main__': graph = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } print(bfs_shortest_path(graph, 'G', 'D')) print(bfs_shortest_path_print(graph, 'G', 'D'))
''' ''' print('\nNested Function\n') def function_1(text): text = text def function_2(): print(text) function_2() if __name__ == '__main__': function_1('Welcome') print('\n closure Function \n') def function_1(text): text = text def function_2(): print(text) return function_2 if __name__ == '__main__': myFunction = function_1('Thanks !') myFunction()
# def mul(a=1,b=3): # c=a*b # return c # def add(a=1,b=2): # c=a+b # return c class Student: def __init__(self,first,last,kid): self.fname = first self.lname = last self.kid = kid self.email = first + '.' + last +'@tamuk.edu' def firstname(self): return self.fname stu_1 = Student('ashwitha','devireddy','k00442409') stu_2 = Student('santosh','kesireddy','k00442410') print(stu_1.email) print(stu_2.email) print(stu_1.firstname())
def get_initial(name): initial = name[0:1].upper() return initial first_name = input('Enter your first name: ') first_name_initial = get_initial (first_name) middle_name = input('Enter your middle name: ') middle_name_initial = get_initial (middle_name) last_name = input('Enter your last name: ') last_name_initial = get_initial (last_name) print('You initials are: ' + first_name_initial + middle_name_initial + last_name_initial)
class Password: ''' class of the password file ''' def __init__(self, page, password): self.page = page self.password = password ''' function for class properties ''' ''' user properties ''' user_password = [] def save_page(self): Password.user_passwords.append(self) ''' save password created by new user ''' def delete_page(self): Password.user_passwords.remove(self) ''' deletes password created by new user ''' def display_page(cls): return cls.user_passwords ''' displays new user passwords generated ''' def find_by_page(cls, pager): for pagy in cls.user_passwords: if pagy.page == pager: return pagy ''' function generates new user generated passwords ''' def page_exists(cls, pager): for pagy in cls.user_passwords: if pagy.page == pager: return pagy return False ''' functions displays already generated account credentials '''
# Reescreva a função leiaInt() que fizemos no desafio 104, incluindo agora # a possibilidade da digitação de um número de tipo inválido. Aproveite e crie # também uma função leiaFloat() com a mesma funcionalidade. # Funções def leiaInt(msg): while True: try: num = int(input(msg)) except (ValueError, TypeError): print('\033[1;31mERRO: digite um número inteiro válido!\033[0m') continue except (KeyboardInterrupt): print('\033[1;31mERRO: entrada de dados interrompida!\033[0m') break else: return(num) def leiaFloat(msg): while True: try: num = float(input(msg)) except (ValueError, TypeError): print('\033[1;31mERRO: digite um número válido!\033[0m') continue except (KeyboardInterrupt): print('\033[1;31mERRO: entrada de dados interrompida!\033[0m') break else: return(num) # Bloco principal inteiro = leiaInt('Digite um número inteiro: ') decimal = leiaFloat('Digite um número decimal: ') print(f'Número inteiro digitado: {inteiro}') print(f'Número decimal digitado: {decimal}')
# This is the main settings file for package setup and PyPi deployment. # Sphinx configuration is in the docsrc folder # Main package name PACKAGE_NAME = 'dstream_excel' # Package version in the format (major, minor, release) PACKAGE_VERSION_TUPLE = (0, 4, 6) # Short description of the package PACKAGE_SHORT_DESCRIPTION = 'Automate data collection from Thompson Reuters Datastream ' \ 'using the excel plugin' # Long description of the package PACKAGE_DESCRIPTION = """ Use this tool to drive Excel using the Thompson Reuters Eikon plugin to download Datastream data. See more at the repo page: https://github.com/nickderobertis/datastream-excel-downloader-py """ # Author PACKAGE_AUTHOR = "Nick DeRobertis" # Author email PACKAGE_AUTHOR_EMAIL = 'whoopnip@gmail.com' # Name of license for package PACKAGE_LICENSE = 'MIT' # Classifications for the package, see common settings below PACKAGE_CLASSIFIERS = [ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Operating System :: Microsoft :: Windows', ] # Add any third party packages you use in requirements here PACKAGE_INSTALL_REQUIRES = [ # Include the names of the packages and any required versions in as strings # e.g. # 'package', # 'otherpackage>=1,<2' 'pypiwin32', 'pandas', 'numpy', 'openpyxl', 'xlrd', 'exceldriver', 'processfiles', 'xlwings', ] # Sphinx executes all the import statements as it generates the documentation. To avoid having to install all # the necessary packages, third-party packages can be passed to mock imports to just skip the import. # By default, everything in PACKAGE_INSTALL_REQUIRES will be passed as mock imports, along with anything here. # This variable is useful if a package includes multiple packages which need to be ignored. DOCS_OTHER_MOCK_IMPORTS = [ 'pythoncom', 'win32com', 'pywintypes', 'winreg' ] PACKAGE_URLS = { 'Code': 'https://github.com/nickderobertis/datastream-excel-downloader-py', 'Documentation': 'https://nickderobertis.github.io/datastream-excel-downloader-py/' }
# -*- coding: utf-8 -*- # @Time: 2020/1/5 6:22 下午 # @Author: GraceKoo # @File: 35_search-insert-position.py # @Desc:https://leetcode-cn.com/problems/search-insert-position/ class Solution: def searchInsert(self, nums, target: int) -> int: for i in range(0, len(nums) - 1): if nums[i] == target: return i elif nums[i] < target <= nums[i + 1]: return i + 1 elif nums[i] > target: return -1 return len(nums) so = Solution() print(so.searchInsert([1, 3, 5, 6], 7))
while True: try: e = str(input()).strip() c = e.replace('.', '').replace('-', '') s = 0 for i in range(9): s += int(c[i]) * (i + 1) b1 = s % 11 b1 = 0 if b1 == 10 else b1 s = 0 if b1 == int(e[-2]): for i in range(9): s += int(c[i]) * (9 - i) b2 = s % 11 b2 = 0 if b2 == 10 else b2 if b2 == int(e[-1]): print('CPF valido') else: print('CPF invalido') else: print('CPF invalido') except EOFError: break
# Intersection of 2 arrays class Solution: def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]: counter = None result = [] if len(nums1) < len(nums2): counter = collections.Counter(nums1) for n in nums2: if n in counter and counter[n] > 0: counter[n] -= 1 result.append(n) else: counter = collections.Counter(nums2) for n in nums1: if n in counter and counter[n] > 0: counter[n] -= 1 result.append(n) return result
class propertyDecorator(object): def __init__(self, x): self._x = x @property def x(self): return self._x @x.setter def x(self, value): self._x = value pd = propertyDecorator(100) print(pd.x) pd.x = 10 print(pd.x)
def test(): assert ( "span1.similarity(span2)" in __solution__ or "span2.similarity(span1)" in __solution__ ), "你有计算两个span之间的相似度吗?" assert span1.text == "不错的餐厅", "你有正确生成span1吗?" assert span2.text == "很好的酒吧", "你有正确生成span2吗?" assert ( 0 <= float(similarity) <= 1 ), "相似度分数是一个浮点数。你确定你计算正确了吗?" __msg__.good( "做得好!如果愿意的话你可以随便再做些比对其它实例的实验。" "这些相似度并不*一定*是绝对正确的。一旦你要开始认真开发一些自然语言处理" "的应用并且用到语义相似度的话,你可能需要在自己的数据上先训练词向量,或者" "再去改进一下相似度的算法。" )
countries = ["USA", "Spain", "France", "Canada"] for country in countries: print(country) data = "Hello from python" for out in data: print(out) for numero in range(8): print(numero)
# X = { # "0xFF": { # "id": 255, # "name": "meta", # "0x00": { # "type_id": 0, # "type_name": "sequence_number", # "length": 2, # "params": [ # "nmsb", # "nlsb" # ], # "dtype": "int", # "mask": [ # 255, # 255 # ], # "default" : [0, 0] # }, # "0x01": { # "type_id": 1, # "type_name": "text_event", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "Enter the Text" # }, # "0x02": { # "type_id": 2, # "type_name": "copyright_notice", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "No Copyright" # }, # "0x03": { # "type_id": 3, # "type_name": "track_name", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "Track 0" # }, # "0x04": { # "type_id": 4, # "type_name": "instrument_name", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "Piano" # }, # "0x05": { # "type_id": 5, # "type_name": "lyrics", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "LYRICS" # }, # "0x06": { # "type_id": 6, # "type_name": "marker", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "*" # }, # "0x07": { # "type_id": 7, # "type_name": "cue_point", # "length": -1, # "params": "text", # "dtype": "str", # "mask": 127, # "default" : "-" # }, # "0x20": { # "type_id": 32, # "type_name": "midi_ch_prefix", # "length": 1, # "params": [ # "channel" # ], # "dtype": "int", # "mask": [ # 15 # ], # "default" : [0] # }, # "0x21": { # "type_id": 33, # "type_name": "midi_port", # "length": 1, # "params": [ # "port_no" # ], # "dtype": "int", # "mask": [ # 15 # ], # "default" : [0] # }, # "0x2F": { # "type_id": 47, # "type_name": "end_of_track", # "length": 0, # "params": None, # "dtype": "None", # "mask": 127, # "default" : None # }, # "0x51": { # "type_id": 81, # "type_name": "set_tempo", # "length": 3, # "params": [ # "musec_per_quat_note", # "musec_per_quat_note", # "musec_per_quat_note" # ], # "dtype": "int", # "mask": [ # 127, # 127, # 127 # ], # "default" : [0, 0, 0] # }, # "0x54": { # "type_id": 84, # "type_name": "smpte_offset", # "length": 5, # "params": [ # "hr", # "min", # "sec", # "fr", # "subfr" # ], # "dtype": "int", # "mask": [ # 24, # 60, # 60, # 30, # 100 # ], # "default" : [0, 0 , 0, 0, 0] # }, # "0x58": { # "type_id": 88, # "type_name": "time_sig", # "length": 4, # "params": [ # "numer", # "denom", # "metro", # "32nds" # ], # "dtype": "int", # "mask": [ # 255, # 255, # 255, # 255 # ], # "default" : [0, 0, 0, 0] # }, # "0x59": { # "type_id": 89, # "type_name": "key_sig", # "length": 2, # "params": [ # "key", # "scale" # ], # "dtype": "int", # "mask": [ # 15, # 1 # ], # "default" : [0, 1] # }, # "0x7F": { # "type_id": 127, # "type_name": "sequence_specifier", # "length": -1, # "params": "text", # "dtype": "any", # "mask": 127, # "default" : [0] # } # } # } X = { 255: { 0: {'dtype': 'int', 'length': 2, 'mask': (255, 255), 'params': ('nmsb', 'nlsb'), 'type_id': 0, 'type_name': 'sequence_number', "default" : [0, 0] }, 1: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 1, 'type_name': 'text_event', "default" : "Enter the Text" }, 2: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 2, 'type_name': 'copyright_notice', "default" : "No Copyright" }, 3: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 3, 'type_name': 'track_name', "default" : "Track 0" }, 4: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 4, 'type_name': 'instrument_name', "default" : "Piano" }, 5: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 5, 'type_name': 'lyrics', "default" : "LYRICS" }, 6: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 6, 'type_name': 'marker', "default" : "*" }, 7: {'dtype': 'str', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 7, 'type_name': 'cue_point', "default" : "-" }, 32: {'dtype': 'int', 'length': 1, 'mask': (15,), 'params': ('channel',), 'type_id': 32, 'type_name': 'midi_ch_prefix', "default" : [0] }, 33: {'dtype': 'int', 'length': 1, 'mask': (15,), 'params': ('port_no',), 'type_id': 33, 'type_name': 'midi_port', "default" : [0] }, 47: {'dtype': 'None', 'length': 0, 'mask': 127, 'params': None, 'type_id': 47, 'type_name': 'end_of_track', "default" : None }, 81: {'dtype': 'int', 'length': 3, 'mask': (127, 127, 127), 'params': ('musec_per_quat_note', 'musec_per_quat_note', 'musec_per_quat_note'), 'type_id': 81, 'type_name': 'set_tempo', "default" : [0, 0, 0] }, 84: {'dtype': 'int', 'length': 5, 'mask': (24, 60, 60, 30, 100), 'params': ('hr', 'min', 'sec', 'fr', 'subfr'), 'type_id': 84, 'type_name': 'smpte_offset', "default" : [0, 0, 0, 0, 0] }, 88: {'dtype': 'int', 'length': 4, 'mask': (255, 255, 255, 255), 'type_id': 88, 'type_name': 'time_sig', "default" : [0, 0, 0, 0] }, 89: {'dtype': 'int', 'length': 2, 'mask': (15, 1), 'params': ('key', 'scale'), 'type_id': 89, 'type_name': 'key_sig', "default" : [0, 0] }, 127: {'dtype': 'any', 'length': -1, 'mask': 127, 'params': 'text', 'type_id': 127, 'type_name': 'sequence_specifier', "default" : [0,] }, 'id': 255, 'name': 'meta' } }
def make_car(car_manufacturer,car_model,**Other_attributes): Other_attributes['car_model']=car_model Other_attributes['car_manufacturer']=car_manufacturer return Other_attributes print(make_car('Toyota','Rav 4',color='blue'))
# -*- coding: utf-8 -*- """ Created on Fri Feb 22 16:21:21 2019 @author: x """
def find_largest_palindrome(range_start: int, range_end: int) -> int: largest: int = 0 product: int i: int for i in range(range_start, range_end): j: int for j in range(range_start, range_end): product = i * j if is_palindrome(str(product)) and product > largest: largest = product return largest def is_palindrome(input_string: str) -> bool: input_length: int = len(input_string) direct: int index: int for index in range(0, input_length): direct = input_length - index - 1 if direct <= index: return True elif input_string[direct] != input_string[index]: return False return False
# SPDX-License-Identifier: MIT # Copyright (c) 2020 Akumatic # # https://adventofcode.com/2020/day/18 def read_file() -> list: with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f: return [line.strip() for line in f.readlines()] def evaluate(values: list, operators: list, precedence: bool) -> int: if not precedence: # "+" and "*" have same precedence levels result = int(values[0]) for i in range(len(operators)): if operators[i] == "+": result += int(values[i+1]) else: # operators[i] == "*" result *= int(values[i+1]) else: # "+" and "*" have different precedence levels; "+" evaluated before "*" while True: try: idx = operators.index("+") values = values[:idx] + [values[idx] + values[idx+1]] + values[idx+2:] operators = operators[:idx] + operators[idx+1:] except ValueError: break result = 1 for factor in values: result *= factor return result def parse(expression: str, precedence: bool = False) -> int: expression = expression.replace(" ", "") values = list() operators = list() i = 0 while i < len(expression): if expression[i] == "+": operators.append("+") i += 1 elif expression[i] == "*": operators.append("*") i += 1 elif expression[i] == "(": # find correct closing bracket layer = 1 j = i + 1 while j < len(expression): if expression[j] == "(": layer += 1 elif expression[j] == ")": if layer == 1: break layer -= 1 j += 1 # evaluate expression between brackets values.append(parse(expression[i+1:j], precedence)) i += j - i + 1 else: # numbers j = i value = 0 while j < len(expression) and expression[j].isnumeric(): value = value * 10 + int(expression[j]) j += 1 values.append(value) i += j - i return evaluate(values, operators, precedence) def part1(input: list) -> int: return sum([parse(line) for line in input]) def part2(input: list) -> int: return sum([parse(line, precedence=True) for line in input]) if __name__ == "__main__": input = read_file() print(f"Part 1: {part1(input)}") print(f"Part 2: {part2(input)}")
class AuthenticationError(Exception): pass class MarketClosedError(Exception): pass class MarketEmptyError(Exception): pass class InternalStateBotError(Exception): pass