content
stringlengths
7
1.05M
""" This module demonstrates OVERLOADING the + symbol: -- With numbers as operands, it means addition (as in arithmetic) -- With sequences as operands, it means concatenation, that is, forming a new sequence that stitches together its operands. This module also demonstrates the STR function. Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays, Amanda Stouder, Aaron Wilkin, and their colleagues. """ # ----------------------------------------------------------------------------- # Students: Read and run this program. There is nothing else # for you to do in here. Just use it as an example. # Before you leave this example, # *** MAKE SURE YOU UNDERSTAND: *** # *** -- What it means to use + for CONCATENATION *** # *** -- What the str function does. *** # ----------------------------------------------------------------------------- def main(): """ Demonstrates OVERLOADING the + symbol. """ # ------------------------------------------------------------------------- # First example below: computes 5 + 33 (addition, as in arithmetic) # Second example below: stitches together the two lists. # Third example below: stitches together the three tuples. # Fourth example below: stitches together the four strings. # Fifth example: contrasts concatenation with addition. # ------------------------------------------------------------------------- print() print('-----------------------------------------------------------') print('Addition, then various forms of concatenation:') print('-----------------------------------------------------------') print(5 + 33) print([4, 3] + [1, 7, 2, 4]) print((4, 1, 7) + (444,) + (3, 3)) print('hello' + 'Dave' + '55' + '83') print(5 + 33, '5' + '33') # ------------------------------------------------------------------------- # The str function and the concatenation form of the + operator # are handy for making strings from sub-strings. For example: # ------------------------------------------------------------------------- x = 51 y = 3 z = 40 print() print('-----------------------------------------------------------') print('With and (using string concatenation) without spaces:') print('-----------------------------------------------------------') # ------------------------------------------------------------------------- # Printing multiple items puts spaces between the items. # That is usually what you want. # ------------------------------------------------------------------------- print(x, y, z) # ------------------------------------------------------------------------- # But if you don't want spaces # (or want to otherwise format the string result): # ------------------------------------------------------------------------- x = [] for k in range(5): x = x + [(2 * k)] print(x)
""" 输入一个正整数判断它是不是素数 """ num = int(input('请输入一个正整数: ')) end = int(num ** 0.5) + 1 is_prime = True for x in range(2, end): if num % x == 0: is_prime = False break if is_prime and num != 1: print(f'{num}是素数') else: print(f'{num}不是素数')
filename = 'full_text_small.txt' def file_write(filename): with open(filename, 'r') as f: n = 0 for line in f: n += 1 if n <= 5: print(line) return(line) file_write(filename)
a = 1 b = 0 c = a & b d = a | b e = a ^ b print(c+d+e) my_list = [[1,2,3,4] for i in range(2)] print(my_list[1][0]) x =2 x = x==x print(x) my_list = [1,2,3] for v in range(len(my_list)): my_list.insert(1, my_list[v]) print(my_list)
n = int(input()) ans = 0 for i in range(n): l, c = map(int, input().split()) if l > c: ans += c else: continue print(ans)
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn_moco.py', '../_base_/datasets/vocdataset_voc0712.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] optimizer = dict(type='SGD', lr=0.02/16, momentum=0.9, weight_decay=0.0001)
_base_ = ['./mswin_par_small_patch4_512x512_160k_ade20k_pretrain_224x224_1K.py'] model = dict( decode_head=dict( mode='seq', )) data = dict(samples_per_gpu=10)
def merge_sort(arr): n = len(arr) if (n >= 2): A = merge_sort(arr[:int(n/2)]) B = merge_sort(arr[int(n/2):]) i = 0 j = 0 for k in range(0, n): if i < int(n/2) and (j == len(B) or A[i] <= B[j]): arr[k] = A[i] i = i + 1 else: arr[k] = B[j] j = j + 1 return arr arr = input() arr = [(int(num)) for num in arr.split()] print(merge_sort(arr))
#Задачи на циклы и оператор условия------ #---------------------------------------- ''' Задача 1 Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована. ''' for i in range(1,6): print(i,' 0') ''' Задача 2 Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5. ''' s = 0 for i in range(10): a = int(input('Введите число ')) if a == 5: s += 1 print('Количество введенных цифр 5 = ',s) ''' Задача 3 Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран. ''' abc = 0 for i in range(1,101): abc += i print('Сумма чисел от 1 до 100 = ',abc) ''' Задача 4 Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран. ''' p = 1 for i in range(1,11): p *= i print('Произведение числе от 1 до 10 = ',p) ''' Задача 5 Вывести цифры числа на каждой строчке. ''' a = int(input('Введите целое число ')) while a != 0: print (a % 10) a = a // 10 ''' Задача 6 Найти сумму цифр числа. ''' a = int(input('Введите целое число ')) sum = 0 while a != 0: sum += (a % 10) a = a // 10 print('Сумма цифр числа = ',sum) ''' Задача 7 Найти произведение цифр числа. ''' a = int(input('Введите целое число ')) pr = 1 while a != 0: pr *= (a % 10) a = a // 10 print('Произведение цифр числа = ',pr) ''' Задача 8 Дать ответ на вопрос: есть ли среди цифр числа 5? ''' a = int(input('Введите целое число ')) b = 0 while a > 0: if a % 10 == 5: print('Есть 5') break a = a // 10 else: print('Нет пятерок') ''' Задача 9 Найти максимальную цифру в числе ''' a = int(input('Введите целое число ')) max = 0 while a != 0: if max < a % 10: max = a % 10 a = a // 10 print('Максимальная цифра в числе - ',max) ''' Задача 10 Найти количество цифр 5 в числе ''' a = int(input('Введите целое число ')) b = 0 while a > 0: if a % 10 == 5: b += 1 a = a // 10 print('Количество цифр 5 в числе = ',b)
# -------------- # Code starts here class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio'] class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes'] new_class = class_1 + class_2 print(new_class) new_class.append('Peter Warden') print(new_class) new_class.remove('Carla Gentry') print(new_class) # Code ends here # -------------- # Code starts here courses = { 'Math' : 65, 'English' : 70, 'History' : 80, 'French' : 70, 'Science' : 60 } print(courses.get('Math')) print(courses.get('English')) print(courses.get('History')) print(courses.get('French')) print(courses.get('Science')) total = (courses.get('Math')+courses.get('English')+courses.get('History')+courses.get('French')+courses.get('Science')) print(total) percentage = (total/500)*100 print(percentage) # Code ends here # -------------- # Code starts here mathematics = { 'Geoffery Hinton' : 78, 'Andrew Ng' : 95, 'Sebastian Raschka' :65, 'Yoshua Benjio' : 50, 'Hilary Mason' : 70, 'Corinna Cortes' : 66, 'Peter Warden' : 75 } topper = max(mathematics,key = mathematics.get) print(topper) # Code ends here # -------------- # Given string topper = 'andrew ng' first_name = topper.split()[0] last_name = topper.split()[1] full_name = last_name+' '+first_name certificate_name = full_name.upper() print(certificate_name) # Code starts here # Code ends here
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def f_gold ( x , y , z ) : if ( not ( y / x ) ) : return y if ( not ( y / z ) ) else z return x if ( not ( x / z ) ) else z #TOFILL if __name__ == '__main__': param = [ (48,63,56,), (11,55,84,), (50,89,96,), (21,71,74,), (94,39,42,), (22,44,86,), (3,41,68,), (67,62,94,), (59,2,83,), (50,11,1,) ] n_success = 0 for i, parameters_set in enumerate(param): if f_filled(*parameters_set) == f_gold(*parameters_set): n_success+=1 print("#Results: %i, %i" % (n_success, len(param)))
config = { 'lr': (1.5395901937079718e-05, 4.252664987376195e-05, 9.011700881717918e-05, 0.00026653695086486183), 'target_stepsize': 0.07688144983085089, 'feedback_wd': 5.751527315358352e-07, 'beta1': 0.9, 'beta2': 0.999, 'epsilon': (7.952762675272583e-06, 3.573159556208438e-06, 1.0425400798717413e-08, 2.0232644009531115e-08), 'lr_fb': 4.142073343374983e-05, 'sigma': 0.18197929046014408, 'beta1_fb': 0.9, 'beta2_fb': 0.999, 'epsilon_fb': 8.070760899188774e-06, 'out_dir': 'logs/cifar/DDTPConvCIFAR', 'network_type': 'DDTPConvCIFAR', 'initialization': 'xavier_normal', 'fb_activation': 'linear', 'dataset': 'cifar10', # ### Training options ### 'optimizer': 'Adam', 'optimizer_fb': 'Adam', 'momentum': 0., 'parallel': True, 'normalize_lr': True, 'batch_size': 128, 'epochs_fb': 10, 'not_randomized': True, 'not_randomized_fb': True, 'extra_fb_minibatches': 0, 'extra_fb_epochs': 1, 'epochs': 300, 'double_precision': True, 'no_val_set': True, 'forward_wd': 0., ### Network options ### # 'num_hidden': 3, # 'size_hidden': 1024, # 'size_input': 3072, # 'size_output': 10, 'hidden_activation': 'tanh', 'output_activation': 'softmax', 'no_bias': False, ### Miscellaneous options ### 'no_cuda': False, 'random_seed': 42, 'cuda_deterministic': False, 'freeze_BPlayers': False, 'multiple_hpsearch': False, ### Logging options ### 'save_logs': False, 'save_BP_angle': False, 'save_GN_angle': False, 'save_GN_activations_angle': False, 'save_BP_activations_angle': False, 'gn_damping': 0. }
# constants related to the matchers # all the types of matches MATCH_TYPE_NONE = 0 MATCH_TYPE_RESET = 1 MATCH_TYPE_NMI = 2 MATCH_TYPE_WAIT_START = 3 MATCH_TYPE_WAIT_END = 4 MATCH_TYPE_BITS = 6 # number of bits required to represent the above (max 8) NUM_MATCHERS = 32 # how many match engines are there? MATCHER_BITS = 5 # number of bits required to represent the above (max 8)
def get_initial(name, force_uppercase=True): if force_uppercase: initial = name[0:1].upper() else: initial = name[0:1].lower() return initial first_name = input('Enter your first name: ') # initial = get_initial(first_name) initial = get_initial(force_uppercase=False, name=first_name) print('Your initial is: ' + initial)
INPUT_PATH = "./input.txt" input_file = open(INPUT_PATH, "r") lines = input_file.readlines() input_file.close() divided_input = [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in lines] # Part 1 print("Part 1: ", sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in divided_input])) # Cursed part 1 1-liner assuming INPUT_PATH is defined with path to the input file #print("Part 1: ", sum([len([x for x in entry[1] if len(x) in [2, 3, 4, 7]]) for entry in [[[set(x) for x in x.split()] for x in line.split(" | ")] for line in open(INPUT_PATH, "r").readlines()]])) # Part 2 total_sum = 0 for elem in divided_input: left = elem[0] right = elem[1] numbers = { "0": None, "1": [x for x in left if len(x) == 2][0], "2": None, "3": None, "4": [x for x in left if len(x) == 4][0], "5": None, "6": None, "7": [x for x in left if len(x) == 3][0], "8": [x for x in left if len(x) == 7][0], "9": None } segments = { "a": None, "b": None, "c": None, "d": None, "e": None, "f": None, "g": None } (segments["a"],) = numbers["7"].difference(numbers["1"]) numbers["6"] = [x for x in left if len(x) == 6 and len(x.intersection(numbers["1"])) == 1][0] zero_and_nine = [x for x in left if len(x) == 6 and len(x.difference(numbers["6"])) != 0] numbers["9"] = [x for x in zero_and_nine if len(x.intersection(numbers["4"])) == 4][0] numbers["0"] = [x for x in zero_and_nine if len(x.intersection(numbers["4"])) == 3][0] (segments["f"],) = numbers["6"].intersection(numbers["1"]) (segments["c"],) = numbers["1"].difference(set(segments["f"])) (segments["e"],) = numbers["6"].difference(numbers["9"]) (segments["d"],) = numbers["8"].difference(numbers["0"]) (segments["b"],) = numbers["4"].difference(set([x for x in segments.values() if x is not None])) (segments["g"],) = numbers["8"].difference(set([x for x in segments.values() if x is not None])) numbers["2"] = set([segments["a"], segments["c"], segments["d"], segments["e"], segments["g"]]) numbers["5"] = set([segments["a"], segments["b"], segments["d"], segments["f"], segments["g"]]) numbers["3"] = set([segments["a"], segments["c"], segments["d"], segments["f"], segments["g"]]) value = 0 for digit_set in right: value *= 10 digit = int([k for k, v in numbers.items() if v == digit_set][0]) value += digit total_sum += value print("Part 2: ", total_sum)
#Python Lists mylist = [ "banana", "abacate", "manga"] print(mylist)
# working on final project to combine all the learnt concepts into 1 # problem statement. #The CTO wants to monitor all the computer usage by all engineers. Using Python , # write an automation script that will produce a report when each user logged in and out, # and how long each user used the computers. # writing real script: # first sort all the processes by date using a function: def get_event_date(event): return event.date #get current user and pass the sorted date function # first sort all the processes by date using a function: def get_event_date(event): return event.date #get current user and pass the sorted date function # first sort all the processes by date using a function: def get_event_date(event): return event.date #get current user and pass the sorted date function def current_users(events): events.sort(key=get_event_date) #create a dictionary to store the values machines = {} for event in events: #check if a mchine exist in dictionary else add if event.machine not in machines: machines[event.machine] = set() if event.type == "logout": machines[event.machine].add(event.user) elif event.type == "logout": machines[event.machine].remove(event.user) return machines #create a different function to print the report def generate_report(machines): for machines,users in machines.items(): #print only thoses who logged in and not those who loged in and out: if len(users)>0: users_list = ", ".join(users) print("{} : {}".format(machines,users_list)) class Event: def __init__(self, event_date, event_type, machine_name,user): self.date = event_date self.type = event_type self.machine =machine_name self.user = user events = [ Event("2020-05-12 12:50PM","login","mail-server local", "owen"), Event("2021-04-12 4:50PM","logout","mail-server local", "james"), Event("2020-05-14 2:50PM","login","workstation local", "shem"), Event("2020-05-1 16:50PM","login","mail-server local", "Timz"), Event("2020-06-19 18:50PM","logout","admin server local", "brian"), Event("2020-02-12 17:50PM","login","mail-server local", "chris") ] #try creation users = current_users(events) print(users) # generate user report generate_report(users) # CONGRATALTIONS: # Up next final project;
class RockartExamplesException(Exception): pass class RockartExamplesIndexError(RockartExamplesException, IndexError): pass class RockartExamplesValueError(RockartExamplesException, ValueError): pass
print("Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n") row = int(input("Enter the number of rows: ")) for i in range(1, row+1): for j in range(i): print("*", end=" ") print() for i in range(row+1, 0, -1): for j in range(i): print("*", end=" ") print()
#!/usr/bin/env python3 for hour_offset in range(0, 24, 6): train = open('data/train_b{:02}.csv'.format(hour_offset), 'w', newline='') test = open('data/test_b{:02}.csv'.format(hour_offset), 'w', newline='') data = open('data/data.txt') t = int(next(data)) n, m = tuple(map(int, next(data).split())) for line_num, line in enumerate(data): hse = line_num // n # hours since epoch hod = hse % 24 # hour of day dse = (hse + hour_offset) // 24 # days since epoch dow = dse % 7 # day of week row = line_num % n for col, dem in enumerate(map(int, line.split())): out, lim = (test, -1) if dem == -1 else (train, None) out.write(','.join(map(str, (hse, row, col, hod, dow, dem)[:lim])) + '\n') data.close() train.close() test.close()
p = [1,2,3,4,5,6,7,8,9] del p[1:3] print(p[:]) p.remove(8) print(p[:]) print(p.pop()) p.clear() print(p[:]) l=[1,3,4,5,6,7] l.remove(3) print(l[:]) l.sort() print(l[:]) l.reverse() print(l[:]) l.clear() print(l[:])
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Skylark rules for Swift.""" load( "@bazel_skylib//lib:collections.bzl", "collections", ) load( "@build_bazel_rules_apple//apple/bundling:apple_bundling_aspect.bzl", "apple_bundling_aspect", ) load( "@build_bazel_rules_apple//apple:providers.bzl", "AppleResourceInfo", "AppleResourceSet", "SwiftInfo", ) load( "@build_bazel_rules_apple//apple:utils.bzl", "xcrun_action", "XCRUNWRAPPER_LABEL", "module_cache_path", "label_scoped_path", ) load( "@build_bazel_rules_apple//apple/bundling:xcode_support.bzl", "xcode_support", ) load( "@build_bazel_rules_apple//common:attrs.bzl", "attrs", ) load( "@build_bazel_rules_apple//common:providers.bzl", "providers", ) def _parent_dirs(dirs): """Returns a set of parent directories for each directory in dirs.""" return depset(direct=[f.rpartition("/")[0] for f in dirs]) def _framework_names(dirs): """Returns the framework name for each directory in dir.""" return depset(direct=[f.rpartition("/")[2].partition(".")[0] for f in dirs]) def _swift_target(cpu, platform, sdk_version): """Returns a target triplet for Swift compiler.""" platform_string = str(platform.platform_type) if platform_string not in ["ios", "watchos", "tvos", "macos"]: fail("Platform '%s' is not supported" % platform_string) if platform_string == "macos": platform_string = "macosx" return "%s-apple-%s%s" % (cpu, platform_string, sdk_version) def _swift_compilation_mode_flags(config_vars, objc_fragment): """Returns additional `swiftc` flags for the current compilation mode. Args: config_vars: The dictionary of configuration variables (i.e., `ctx.var`) that affect compilation of this target. objc_fragment: The Objective-C configuration fragment. Returns: The additional command line flags to pass to `swiftc`. """ mode = config_vars["COMPILATION_MODE"] flags = [] if mode == "dbg" or mode == "fastbuild": # TODO(dmishe): Find a way to test -serialize-debugging-options flags += [ "-Onone", "-DDEBUG", "-enable-testing", "-Xfrontend", "-serialize-debugging-options" ] elif mode == "opt": flags += ["-O", "-DNDEBUG"] if mode == "dbg" or objc_fragment.generate_dsym: flags.append("-g") return flags def _clang_compilation_mode_flags(objc_fragment): """Returns additional clang flags for the current compilation mode.""" # In general, every compilation mode flag from native objc_ rules should be # passed, but -g seems to break Clang module compilation. Since this flag does # not make much sense for module compilation and only touches headers, # it's ok to omit. native_clang_flags = objc_fragment.copts_for_current_compilation_mode return [x for x in native_clang_flags if x != "-g"] def _swift_bitcode_flags(apple_fragment): """Returns bitcode flags based on selected mode.""" mode = str(apple_fragment.bitcode_mode) if mode == "embedded": return ["-embed-bitcode"] elif mode == "embedded_markers": return ["-embed-bitcode-marker"] return [] def _swift_sanitizer_flags(features): """Returns sanitizer flags.""" sanitizer_features_to_flags = { "asan": ["-sanitize=address"], } sanitizer_flags = [] for (feature, flags) in sanitizer_features_to_flags.items(): if feature in features: sanitizer_flags.extend(flags) return sanitizer_flags def swift_module_name(label): """Returns a module name for the given label.""" prefix = label.package.lstrip("//").replace("/", "_").replace("-", "_") suffix = label.name.replace("-", "_") if prefix: return (prefix + "_" + suffix) else: return suffix def _swift_lib_dir(apple_fragment, config_vars, is_static=False): """Returns the location of Swift runtime directory to link against. Args: apple_fragment: The Apple configuration fragment. config_vars: The dictionary of configuration variables (i.e., `ctx.var`) that affect compilation of this target. is_static: If True, the static library directory will be used instead of the dynamic library directory (currently available only on macOS). Returns: The location of the Swift runtime directory to link against. """ dir_name = "swift_static" if is_static else "swift" platform_str = apple_fragment.single_arch_platform.name_in_plist.lower() if "xcode_toolchain_path" in config_vars: return "{0}/usr/lib/{1}/{2}".format( config_vars["xcode_toolchain_path"], dir_name, platform_str) return "{0}/Toolchains/XcodeDefault.xctoolchain/usr/lib/{1}/{2}".format( apple_common.apple_toolchain().developer_dir(), dir_name, platform_str) def swift_linkopts(apple_fragment, config_vars, is_static=False): """Returns additional linker arguments needed to link Swift. Args: apple_fragment: The Apple configuration fragment. config_vars: The dictionary of configuration variables (i.e., `ctx.var`) that affect compilation of this target. is_static: If True, the static library directory will be used instead of the dynamic library directory (currently available only on macOS). Returns: Additional linker arguments needed to link Swift. """ return ["-L" + _swift_lib_dir(apple_fragment, config_vars, is_static)] def _swift_parsing_flags(srcs): """Returns additional parsing flags for swiftc.""" # swiftc has two different parsing modes: script and library. # The difference is that in script mode top-level expressions are allowed. # This mode is triggered when the file compiled is called main.swift. # Additionally, script mode is used when there's just one file in the # compilation. we would like to avoid that and therefore force library mode # when there's only one source and it's not called main. if len(srcs) == 1 and srcs[0].basename != "main.swift": return ["-parse-as-library"] return [] def _is_valid_swift_module_name(string): """Returns True if the string is a valid Swift module name.""" if not string: return False for char in string: # Check that the character is in [a-zA-Z0-9_] if not (char.isalnum() or char == "_"): return False return True def _validate_rule_and_deps(ctx): """Validates the target and its dependencies.""" name_error_str = ("Error in target '%s', Swift target and its dependencies' "+ "names can only contain characters in [a-zA-Z0-9_].") # Validate the name of the target if not _is_valid_swift_module_name(ctx.label.name): fail(name_error_str % ctx.label) # Validate names of the dependencies for dep in ctx.attr.deps: if not _is_valid_swift_module_name(dep.label.name): fail(name_error_str % dep.label) def _get_wmo_state(copts, swift_fragment): """Returns the status of Whole Module Optimization feature. Args: copts: The list of copts to search for WMO flags. swift_fragment: The Swift configuration fragment. Returns: A Boolean value indicating whether WMO has been enabled. """ all_copts = copts + swift_fragment.copts() return "-wmo" in all_copts or "-whole-module-optimization" in all_copts def swift_compile_requirements( srcs, deps, module_name, label, copts, defines, apple_fragment, objc_fragment, swift_fragment, config_vars, default_configuration, xcode_config, genfiles_dir, features): """Returns a struct that contains the requirements to compile Swift code. Args: srcs: The list of `*.swift` sources to compile. deps: The list of targets that are dependencies for the sources being compiled. module_name: The name of the Swift module to which the compiled files belong. label: The label used to generate the Swift module name if one was not provided. copts: A list of compiler options to pass to `swiftc`. Defaults to an empty list. defines: A list of compiler defines to pass to `swiftc`. Defaults to an empty list. apple_fragment: The Apple configuration fragment. objc_fragment: The Objective-C configuration fragment. swift_fragment: The Swift configuration fragment. config_vars: The dictionary of configuration variables (i.e., `ctx.var`) that affect compilation of this target. default_configuration: The default configuration retrieved from the rule context. xcode_config: the XcodeVersionConfig to use genfiles_dir: The directory where genfiles are written. features: List of enabled features as passed with --features or set into the features attribute. Returns: A structure that contains the information required to compile Swift code. """ return struct( srcs=srcs, deps=deps, module_name=module_name, label=label, copts=copts, defines=defines, apple_fragment=apple_fragment, objc_fragment=objc_fragment, swift_fragment=swift_fragment, config_vars=config_vars, default_configuration=default_configuration, xcode_config=xcode_config, genfiles_dir=genfiles_dir, features=features, ) def swiftc_inputs(ctx): """Determine the list of inputs required for the compile action. Args: ctx: rule context. Returns: A list of files needed by swiftc. """ # TODO(allevato): Simultaneously migrate callers off this function and swap it # out with swiftc_inputs. return _swiftc_inputs(ctx.files.srcs, ctx.attr.deps) def _swiftc_inputs(srcs, deps=[]): """Determines the list of inputs required for a compile action. Args: srcs: A list of `*.swift` source files being compiled. deps: A list of targetsthat are dependencies of the files being compiled. Returns: A list of files that should be passed as inputs to the Swift compilation action. """ swift_providers = providers.find_all(deps, SwiftInfo) dep_modules = depset(transitive=[ swift.transitive_modules for swift in swift_providers ]) transitive_objc = apple_common.new_objc_provider( providers=providers.find_all(deps, "objc")) objc_files = depset(transitive=[ transitive_objc.header, transitive_objc.module_map, transitive_objc.umbrella_header, transitive_objc.static_framework_file, transitive_objc.dynamic_framework_file, ]) return srcs + dep_modules.to_list() + objc_files.to_list() def swiftc_args(ctx): """Returns an almost compelete array of arguments to be passed to swiftc. This macro is intended to be used by the swift_library rule implementation below but it also may be used by other rules outside this file. It has no side effects and does not modify ctx. It expects ctx to contain the same fragments and attributes as swift_library (you're encouraged to depend on SWIFT_LIBRARY_ATTRS in your rule definition). Args: ctx: rule context Returns: A list of command line arguments for swiftc. The returned arguments include everything except the arguments generation of which would require adding new files or actions. """ # TODO(allevato): Simultaneously migrate callers off this function and swap it # out with swiftc_args. reqs = swift_compile_requirements( ctx.files.srcs, ctx.attr.deps, ctx.attr.module_name, ctx.label, ctx.attr.copts, ctx.attr.defines, ctx.fragments.apple, ctx.fragments.objc, ctx.fragments.swift, ctx.var, ctx.configuration, ctx.attr._xcode_config[apple_common.XcodeVersionConfig], ctx.genfiles_dir, ctx.features) return _swiftc_args(reqs) def _swiftc_args(reqs): """Returns an almost complete array of arguments to be passed to swiftc. This macro is intended to be used by the swift_library rule implementation below but it also may be used by other rules outside this file. Args: reqs: The compilation requirements as returned by `swift_compile_requirements`. Returns: A list of command line arguments for `swiftc`. The returned arguments include everything except the arguments generation of which would require adding new files or actions. """ apple_fragment = reqs.apple_fragment deps = reqs.deps cpu = apple_fragment.single_arch_cpu platform = apple_fragment.single_arch_platform target_os = reqs.xcode_config.minimum_os_for_platform_type( platform.platform_type) target = _swift_target(cpu, platform, target_os) apple_toolchain = apple_common.apple_toolchain() # Collect transitive dependecies. dep_modules = depset() swiftc_defines = depset(reqs.defines) swift_providers = providers.find_all(deps, SwiftInfo) for swift in swift_providers: dep_modules += swift.transitive_modules swiftc_defines += swift.transitive_defines objc_providers = providers.find_all(deps, "objc") transitive_objc = apple_common.new_objc_provider(providers=objc_providers) # Everything that needs to be included with -I. These need to be pulled from # the list of providers because there are currently issues with some required # header search paths for ObjC protos not being available to ClangImporter. objc_includes = [] for objc in objc_providers: objc_includes += objc.include.to_list() objc_includes = depset(objc_includes) # Module maps for dependent targets. These should be pulled from the combined # provider to ensure that we only get direct deps. objc_module_maps = transitive_objc.module_map static_frameworks = _framework_names(transitive_objc.framework_dir) # A list of paths to pass with -F flag. framework_dirs = depset( direct=[apple_toolchain.platform_developer_framework_dir(apple_fragment)], transitive=[ _parent_dirs(transitive_objc.framework_dir), _parent_dirs(transitive_objc.dynamic_framework_dir), ]) # objc_library#copts is not propagated to its dependencies and so it is not # collected here. In theory this may lead to un-importable targets (since # their module cannot be compiled by clang), but did not occur in practice. objc_defines = transitive_objc.define srcs_args = [f.path for f in reqs.srcs] # Include each swift module's parent directory for imports to work. include_dirs = depset([x.dirname for x in dep_modules]) # Include the genfiles root so full-path imports can work for generated protos. include_dirs += depset([reqs.genfiles_dir.path]) include_args = ["-I%s" % d for d in include_dirs + objc_includes] framework_args = ["-F%s" % x for x in framework_dirs] define_args = ["-D%s" % x for x in swiftc_defines.to_list()] # Disable the LC_LINKER_OPTION load commands for static frameworks automatic # linking. This is needed to correctly deduplicate static frameworks from also # being linked into test binaries where it is also linked into the app binary. autolink_args = collections.before_each( "-Xfrontend", collections.before_each("-disable-autolink-framework", static_frameworks)) clang_args = collections.before_each( "-Xcc", # Add the current directory to clang's search path. # This instance of clang is spawned by swiftc to compile module maps and # is not passed the current directory as a search path by default. ["-iquote", "."] # Pass DEFINE or copt values from objc configuration and rules to clang + ["-D" + x for x in objc_defines] + reqs.objc_fragment.copts + _clang_compilation_mode_flags(reqs.objc_fragment) # Load module maps explicitly instead of letting Clang discover them on # search paths. This is needed to avoid a case where Clang may load the # same header both in modular and non-modular contexts, leading to # duplicate definitions in the same file. # https://llvm.org/bugs/show_bug.cgi?id=19501 + ["-fmodule-map-file=%s" % x.path for x in objc_module_maps]) args = [ "-emit-object", "-module-name", reqs.module_name, "-target", target, "-sdk", apple_toolchain.sdk_dir(), "-module-cache-path", module_cache_path(reqs.genfiles_dir), ] if reqs.default_configuration.coverage_enabled: args.extend(["-profile-generate", "-profile-coverage-mapping"]) args.extend(_swift_compilation_mode_flags( reqs.config_vars, reqs.objc_fragment)) args.extend(_swift_bitcode_flags(apple_fragment)) args.extend(_swift_parsing_flags(reqs.srcs)) args.extend(_swift_sanitizer_flags(reqs.features)) args.extend(srcs_args) args.extend(include_args) args.extend(framework_args) args.extend(clang_args) args.extend(define_args) args.extend(autolink_args) # Add user flags in the very end, this will let the compiler better sanitize # unterminated flag pairs (e.g. -Xcc) and not clash with generated flags. args.extend(reqs.swift_fragment.copts()) args.extend(reqs.copts) return args def _find_swift_version(args): """Returns the value of the `-swift-version` argument, if found. Args: args: The command-line arguments to be scanned. Returns: The value of the `-swift-version` argument, or None if it was not found in the argument list. """ # Note that the argument can occur multiple times, and the last one wins. last_swift_version = None count = len(args) for i in range(count): arg = args[i] if arg == "-swift-version" and i + 1 < count: last_swift_version = args[i + 1] return last_swift_version def register_swift_compile_actions(ctx, reqs): """Registers actions to compile Swift sources. Args: ctx: The rule context. Within this function, it should only be used to register actions, or declare files; do not use it to access attributes because it may be called from many different rules. reqs: The compilation requirements as returned by `swift_compile_requirements`. Returns: A tuple containing the (1) output files of the compilation action, the (2) `objc` provider, and (3) the `SwiftInfo` provider that should be propagated by a target compiling these Swift sources. """ module_name = reqs.module_name label = reqs.label # Collect transitive dependencies. dep_modules = depset() dep_libs = depset() dep_docs = depset() swiftc_defines = depset(reqs.defines) swift_providers = providers.find_all(reqs.deps, SwiftInfo) for swift in swift_providers: dep_libs += swift.transitive_libs dep_modules += swift.transitive_modules dep_docs += swift.transitive_docs swiftc_defines += swift.transitive_defines # A unique path for rule's outputs. objs_outputs_path = label_scoped_path(reqs.label, "_objs/") output_lib = ctx.new_file(objs_outputs_path + module_name + ".a") output_module = ctx.new_file(objs_outputs_path + module_name + ".swiftmodule") output_doc = ctx.new_file(objs_outputs_path + module_name + ".swiftdoc") # These filenames are guaranteed to be unique, no need to scope. output_header = ctx.new_file(label.name + "-Swift.h") swiftc_output_map_file = ctx.new_file(label.name + ".output_file_map.json") swiftc_output_map = struct() # Maps output types to paths. output_objs = [] # Object file outputs, used in archive action. swiftc_outputs = [] # Other swiftc outputs that aren't processed further. has_wmo = _get_wmo_state(reqs.copts, reqs.swift_fragment) for source in reqs.srcs: basename = source.basename output_map_entry = {} # Output an object file obj = ctx.new_file(objs_outputs_path + basename + ".o") output_objs.append(obj) output_map_entry["object"] = obj.path # Output a partial module file, unless WMO is enabled in which case only # the final, complete module will be generated. if not has_wmo: partial_module = ctx.new_file(objs_outputs_path + basename + ".partial_swiftmodule") swiftc_outputs.append(partial_module) output_map_entry["swiftmodule"] = partial_module.path swiftc_output_map += struct(**{source.path: struct(**output_map_entry)}) # Write down the intermediate outputs map for this compilation, to be used # with -output-file-map flag. # It's a JSON file that maps each source input (.swift) to its outputs # (.o, .bc, .d, ...) # Example: # {'foo.swift': # {'object': 'foo.o', 'bitcode': 'foo.bc', 'dependencies': 'foo.d'}} # There's currently no documentation on this option, however all of the keys # are listed here https://github.com/apple/swift/blob/swift-2.2.1-RELEASE/include/swift/Driver/Types.def ctx.file_action( output=swiftc_output_map_file, content=swiftc_output_map.to_json()) args = ["swiftc"] + _swiftc_args(reqs) swift_version = _find_swift_version(args) args += [ "-I" + output_module.dirname, "-emit-module-path", output_module.path, "-emit-objc-header-path", output_header.path, "-output-file-map", swiftc_output_map_file.path, ] if has_wmo: # WMO has two modes: threaded and not. We want the threaded mode because it # will use the output map we generate. This leads to a better debug # experience in lldb and Xcode. # TODO(b/32571265): 12 has been chosen as the best option for a Mac Pro, # we should get an interface in Bazel to get core count. args.extend(["-num-threads", "12"]) xcrun_action( ctx, inputs=_swiftc_inputs(reqs.srcs, reqs.deps) + [swiftc_output_map_file], outputs=([output_module, output_header, output_doc] + output_objs + swiftc_outputs), mnemonic="SwiftCompile", arguments=args, use_default_shell_env=False, progress_message=("Compiling Swift module %s (%d files)" % (reqs.label.name, len(reqs.srcs)))) xcrun_action(ctx, inputs=output_objs, outputs=(output_lib,), mnemonic="SwiftArchive", arguments=[ "libtool", "-static", "-o", output_lib.path ] + [x.path for x in output_objs], progress_message=( "Archiving Swift objects %s" % reqs.label.name)) # This tells the linker to write a reference to .swiftmodule as an AST symbol # in the final binary. # With dSYM enabled, this results in a __DWARF,__swift_ast section added to # the dSYM binary, from where LLDB is able deserialize module information. # Without dSYM, LLDB will follow the AST references, however there is a bug # where it follows only the first one https://bugs.swift.org/browse/SR-2637 # This means that dSYM is required for debugging until that is resolved. extra_linker_args = ["-Xlinker -add_ast_path -Xlinker " + output_module.path] # The full transitive set of libraries and modules used by this target. transitive_libs = depset([output_lib]) + dep_libs transitive_modules = depset([output_module]) + dep_modules transitive_docs = depset([output_doc]) + dep_docs compile_outputs = [output_lib, output_module, output_header, output_doc] objc_providers = providers.find_all(reqs.deps, "objc") objc_provider_args = { "library": depset([output_lib]) + dep_libs, "header": depset([output_header]), "providers": objc_providers, "link_inputs": depset([output_module]), "uses_swift": True, } # Re-propagate direct Objective-C module maps to dependents, because those # Swift modules still need to see them. We need to construct a new transitive # objc provider to get the correct strict propagation behavior. transitive_objc = apple_common.new_objc_provider(providers=objc_providers) objc_provider_args["module_map"] = transitive_objc.module_map # TODO(b/63674406): For macOS, don't propagate the runtime linker path flags, # because we need to be able to be able to choose the static version of the # library instead. Clean this up once the native bundling rules are deleted. platform_type = ctx.fragments.apple.single_arch_platform.platform_type if platform_type != apple_common.platform_type.macos: objc_provider_args["linkopt"] = depset( swift_linkopts(reqs.apple_fragment, reqs.config_vars) + extra_linker_args, order="topological") objc_provider = apple_common.new_objc_provider(**objc_provider_args) return compile_outputs, objc_provider, SwiftInfo( direct_lib=output_lib, direct_module=output_module, direct_doc=output_doc, swift_version=swift_version, transitive_libs=transitive_libs, transitive_modules=transitive_modules, transitive_defines=swiftc_defines, transitive_docs=transitive_docs, ) def merge_swift_info_providers(targets): """Merges the transitive Swift info of the given targets into a new provider. This function should be used when it is necessary to merge SwiftInfo providers outside of a compile action (which does it automatically). Args: targets: A sequence of targets that may propagate SwiftInfo providers. Those that do not are ignored. Returns: A new SwiftInfo provider that contains the transitive information from all the targets. """ transitive_defines = depset() transitive_libs = depset() transitive_modules = depset() transitive_docs = depset() for swift_info in providers.find_all(targets, SwiftInfo): transitive_defines += swift_info.transitive_defines transitive_libs += swift_info.transitive_libs transitive_modules += swift_info.transitive_modules transitive_docs += swift_info.transitive_docs return SwiftInfo( direct_lib=None, direct_module=None, direct_doc=None, swift_version=None, transitive_defines=transitive_defines, transitive_libs=transitive_libs, transitive_modules=transitive_modules, transitive_docs=transitive_docs, ) def merge_swift_objc_providers(targets): """Merges the transitive objc info of the given targets into a new provider. This is restricted to the keys of the objc provider that are used by Swift compile actions to propagate information about Swift compiled libraries back up to linker actions and so forth. This function should be used when it is necessary to merge objc providers created by other Swift libraries outside of a compile action (which does it automatically). Args: targets: A sequence of targets that may propagate objc providers. Those that do not are ignored. Returns: A new objc provider that contains the transitive information from all the targets. """ libraries = depset() headers = depset() link_inputs = depset() linkopts = depset() for objc in providers.find_all(targets, "objc"): libraries += objc.library headers += objc.header link_inputs += objc.link_inputs linkopts += objc.linkopt objc_provider_args = {"uses_swift": True} if headers: objc_provider_args["header"] = headers if libraries: objc_provider_args["library"] = libraries if linkopts: objc_provider_args["linkopt"] = linkopts if link_inputs: objc_provider_args["link_inputs"] = link_inputs return apple_common.new_objc_provider(**objc_provider_args) def _collect_resource_sets(resources, structured_resources, deps, module_name): """Collects resource sets from the target and its dependencies. Args: resources: The resources associated with the target being built. structured_resources: The structured resources associated with the target being built. deps: The dependencies of the target being built. module_name: The name of the Swift module associated with the resources (either the user-provided name, or the auto-generated one). Returns: A list of structs representing the transitive resources to propagate to the bundling rules. """ resource_sets = [] # Create a resource set from the resources attached directly to this target. if resources or structured_resources: resource_sets.append(AppleResourceSet( resources=depset(resources), structured_resources=depset(structured_resources), swift_module=module_name, )) # Collect transitive resource sets from dependencies. for dep in deps: if AppleResourceInfo in dep: resource_sets.extend(dep[AppleResourceInfo].resource_sets) return resource_sets def _swift_library_impl(ctx): """Implementation for swift_library Skylark rule.""" _validate_rule_and_deps(ctx) resolved_module_name = ctx.attr.module_name or swift_module_name(ctx.label) reqs = swift_compile_requirements( ctx.files.srcs, ctx.attr.deps, resolved_module_name, ctx.label, ctx.attr.copts, ctx.attr.defines, ctx.fragments.apple, ctx.fragments.objc, ctx.fragments.swift, ctx.var, ctx.configuration, ctx.attr._xcode_config[apple_common.XcodeVersionConfig], ctx.genfiles_dir, ctx.features) compile_outputs, objc_provider, swift_info = register_swift_compile_actions( ctx, reqs) resource_sets = _collect_resource_sets( ctx.files.resources, ctx.files.structured_resources, ctx.attr.deps, resolved_module_name) return struct( files=depset(compile_outputs), swift=struct( direct_lib=swift_info.direct_lib, direct_module=swift_info.direct_module, direct_doc=swift_info.direct_doc, swift_version=swift_info.swift_version, transitive_libs=swift_info.transitive_libs, transitive_modules=swift_info.transitive_modules, transitive_docs=swift_info.transitive_docs, transitive_defines=swift_info.transitive_defines, ), objc=objc_provider, providers=[ AppleResourceInfo(resource_sets=resource_sets), swift_info, ]) SWIFT_LIBRARY_ATTRS = { "srcs": attr.label_list(allow_files = [".swift"], allow_empty=False), "deps": attr.label_list( # TODO(b/37902442): Figure out why this is required here; it seems like # having it on the binary should be sufficient because the aspect goes # down all deps, but without this, the aspect runs *after* this rule # gets to examine its deps (so the AppleResource provider isn't there # yet). aspects=[apple_bundling_aspect], providers=[["swift"], [SwiftInfo], ["objc"]] ), "module_name": attr.string(mandatory=False), "defines": attr.string_list(mandatory=False, allow_empty=True), "copts": attr.string_list(mandatory=False, allow_empty=True), "resources": attr.label_list( mandatory=False, allow_empty=True, allow_files=True), "structured_resources": attr.label_list( mandatory=False, allow_empty=True, allow_files=True), "_xcode_config": attr.label( default=configuration_field( fragment="apple", name="xcode_config_label")), "_xcrunwrapper": attr.label( executable=True, cfg="host", default=Label(XCRUNWRAPPER_LABEL)) } swift_library = rule( _swift_library_impl, attrs = SWIFT_LIBRARY_ATTRS, fragments = ["apple", "objc", "swift"], output_to_genfiles=True, ) """ Builds a Swift module. A module is a pair of static library (.a) + module header (.swiftmodule). Dependant targets can import this module as "import RuleName". Args: srcs: Swift sources that comprise this module. deps: Other Swift modules. module_name: Optional. Sets the Swift module name for this target. By default the module name is the target path with all special symbols replaced by "_", e.g. //foo:bar can be imported as "foo_bar". copts: A list of flags passed to swiftc command line. defines: Each VALUE in this attribute is passed as -DVALUE to the compiler for this and dependent targets. """
# -*- coding: utf-8 -*- tentativas = [] posição = 1 n = int(input()) linha = input().split() for i in range(len(linha)): tentativas.append(linha[i]) menor = tentativas[0] for i in range(n): if tentativas[i] < menor: menor = tentativas[i] posição = i + 1 print("{}".format(posição))
# -*- coding: utf-8 -*- CSRF_ENABLED = True SECRET_KEY = "208h3oiushefo9823liukhso8dyfhsdklihf" debug = False
def getLate(): v = Late(**{}) return v class Late(): value = 'late'
formatter = "{} {} {} {}" print(formatter.format(1, 2, 3, 4)) print(formatter.format("one", "two", "three", "four")) print(formatter.format(True, False, False, True)) print(formatter.format(formatter, formatter, formatter, formatter)) print(formatter.format( "I had this thing.", "That you could type up right.", "But it didn't sing.", "So I said goodnight." ))
# Copyright 2014 PDFium authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Original code from V8, original license was: # Copyright 2014 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # This file is used only by the standalone PDFium build. Under a chromium # checkout, the src/testing/gtest.gyp file is used instead. { 'targets': [ { 'target_name': 'gtest', 'toolsets': ['host', 'target'], 'type': 'static_library', 'sources': [ 'gtest/include/gtest/gtest-death-test.h', 'gtest/include/gtest/gtest-message.h', 'gtest/include/gtest/gtest-param-test.h', 'gtest/include/gtest/gtest-printers.h', 'gtest/include/gtest/gtest-spi.h', 'gtest/include/gtest/gtest-test-part.h', 'gtest/include/gtest/gtest-typed-test.h', 'gtest/include/gtest/gtest.h', 'gtest/include/gtest/gtest_pred_impl.h', 'gtest/include/gtest/internal/gtest-death-test-internal.h', 'gtest/include/gtest/internal/gtest-filepath.h', 'gtest/include/gtest/internal/gtest-internal.h', 'gtest/include/gtest/internal/gtest-linked_ptr.h', 'gtest/include/gtest/internal/gtest-param-util-generated.h', 'gtest/include/gtest/internal/gtest-param-util.h', 'gtest/include/gtest/internal/gtest-port.h', 'gtest/include/gtest/internal/gtest-string.h', 'gtest/include/gtest/internal/gtest-tuple.h', 'gtest/include/gtest/internal/gtest-type-util.h', 'gtest/src/gtest-all.cc', 'gtest/src/gtest-death-test.cc', 'gtest/src/gtest-filepath.cc', 'gtest/src/gtest-internal-inl.h', 'gtest/src/gtest-port.cc', 'gtest/src/gtest-printers.cc', 'gtest/src/gtest-test-part.cc', 'gtest/src/gtest-typed-test.cc', 'gtest/src/gtest.cc', 'gtest-support.h', ], 'sources!': [ 'gtest/src/gtest-all.cc', # Not needed by our build. ], 'include_dirs': [ 'gtest', 'gtest/include', ], 'dependencies': [ 'gtest_prod', ], 'defines': [ # In order to allow regex matches in gtest to be shared between Windows # and other systems, we tell gtest to always use it's internal engine. 'GTEST_HAS_POSIX_RE=0', # Unit tests don't require C++11, yet. 'GTEST_LANG_CXX11=0', ], 'all_dependent_settings': { 'defines': [ 'GTEST_HAS_POSIX_RE=0', 'GTEST_LANG_CXX11=0', ], }, 'conditions': [ ['os_posix == 1', { 'defines': [ # gtest isn't able to figure out when RTTI is disabled for gcc # versions older than 4.3.2, and assumes it's enabled. Our Mac # and Linux builds disable RTTI, and cannot guarantee that the # compiler will be 4.3.2. or newer. The Mac, for example, uses # 4.2.1 as that is the latest available on that platform. gtest # must be instructed that RTTI is disabled here, and for any # direct dependents that might include gtest headers. 'GTEST_HAS_RTTI=0', ], 'direct_dependent_settings': { 'defines': [ 'GTEST_HAS_RTTI=0', ], }, }], ['OS=="android"', { 'defines': [ 'GTEST_HAS_CLONE=0', ], 'direct_dependent_settings': { 'defines': [ 'GTEST_HAS_CLONE=0', ], }, }], ['OS=="android"', { # We want gtest features that use tr1::tuple, but we currently # don't support the variadic templates used by libstdc++'s # implementation. gtest supports this scenario by providing its # own implementation but we must opt in to it. 'defines': [ 'GTEST_USE_OWN_TR1_TUPLE=1', # GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set. # gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0 # automatically on android, so it has to be set explicitly here. 'GTEST_HAS_TR1_TUPLE=1', ], 'direct_dependent_settings': { 'defines': [ 'GTEST_USE_OWN_TR1_TUPLE=1', 'GTEST_HAS_TR1_TUPLE=1', ], }, }], ], 'direct_dependent_settings': { 'defines': [ 'UNIT_TEST', ], 'include_dirs': [ 'gtest/include', # So that gtest headers can find themselves. ], 'target_conditions': [ ['_type=="executable"', { 'test': 1, 'conditions': [ ['OS=="mac"', { 'run_as': { 'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'], }, }], ['OS=="win"', { 'run_as': { 'action????': ['$(TargetPath)', '--gtest_print_time'], }, }], ], }], ], 'msvs_disabled_warnings': [4800], }, }, { 'target_name': 'gtest_main', 'type': 'static_library', 'dependencies': [ 'gtest', ], 'sources': [ 'gtest/src/gtest_main.cc', ], }, { 'target_name': 'gtest_prod', 'toolsets': ['host', 'target'], 'type': 'none', 'sources': [ 'gtest/include/gtest/gtest_prod.h', ], }, ], }
activate_mse = 1 activate_adaptation_imp = 1 activate_adaptation_d1 = 1 weight_d2 = 1.0 weight_mse = 1.0 refinement = 1 n_epochs_refinement = 10 lambda_regul = [0.01] lambda_regul_s = [0.01] threshold_value = [0.95] compute_variance = False random_seed = [1985] if not compute_variance else [1985, 2184, 51, 12, 465] class DannMNISTUSPS(object): MAX_NB_PROCESSES = 3 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["dann"], "-upper_bound": [1], "-adaptive_lr": [1], "-is_balanced": [1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-n_epochs": [100], "-batch_size": [128], "-initialize_model": [1], "-init_batch_size": [32], "-refinement": [refinement], "-n_epochs_refinement": [n_epochs_refinement], "-lambda_regul": lambda_regul, "-lambda_regul_s": lambda_regul_s, "-threshold_value": threshold_value, "-random_seed": random_seed } class DannIgnoreMNISTUSPS(object): MAX_NB_PROCESSES = 3 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["dann"], "-upper_bound": [1], "-adaptive_lr": [1], "-is_balanced": [1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-n_epochs": [100], "-batch_size": [128], "-initialize_model": [1], "-init_batch_size": [32], "-adapt_only_first": [1], "-refinement": [refinement], "-n_epochs_refinement": [n_epochs_refinement], "-lambda_regul": lambda_regul, "-threshold_value": threshold_value, "-random_seed": random_seed } class DannZeroImputMNISTUSPS(object): MAX_NB_PROCESSES = 3 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["dann"], "-upper_bound": [0], "-adaptive_lr": [1], "-is_balanced": [1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-n_epochs": [100], "-batch_size": [128], "-initialize_model": [1], "-init_batch_size": [32], "-init_lr": [10 ** -2.5], "-refinement": [refinement], "-n_epochs_refinement": [n_epochs_refinement], "-lambda_regul": lambda_regul, "-threshold_value": threshold_value, "-random_seed": random_seed } class DannImputMNISTUSPS(object): MAX_NB_PROCESSES = 2 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["dann_imput"], "-adaptive_lr": [1], "-is_balanced": [1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-stop_grad": [0], "-n_epochs": [100], "-batch_size": [128], "-initialize_model": [1], "-init_batch_size": [32], "-weight_d2": [weight_d2], "-weight_mse": [weight_mse], "-activate_mse": [activate_mse], "-activate_adaptation_imp": [activate_adaptation_imp], "-activate_adaptation_d1": [activate_adaptation_d1], "-init_lr": [10 ** -2], "-refinement": [refinement], "-n_epochs_refinement": [n_epochs_refinement], "-lambda_regul": lambda_regul, "-lambda_regul_s": lambda_regul_s, "-threshold_value": threshold_value, "-random_seed": random_seed } class DjdotMNISTUSPS(object): MAX_NB_PROCESSES = 2 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["djdot"], "-upper_bound": [1], "-is_balanced": [1], "-djdot_alpha": [0.1], "-adaptive_lr": [1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-n_epochs": [100], "-batch_size": [500], "-initialize_model": [1], "-init_batch_size": [32], "-init_lr": [10 ** -2], "-random_seed": random_seed } class DjdotIgnoreMNISTUSPS(object): MAX_NB_PROCESSES = 2 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["djdot"], "-upper_bound": [1], "-is_balanced": [1], "-djdot_alpha": [0.1], "-adaptive_lr": [1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-n_epochs": [100], "-batch_size": [500], "-initialize_model": [1], "-output_fig": [1], "-init_batch_size": [32], "-init_lr": [10 ** -2], "-adapt_only_first": [1], "-random_seed": random_seed } class DjdotZeroImputMNISTUSPS(object): MAX_NB_PROCESSES = 2 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["djdot"], "-upper_bound": [0], "-adaptive_lr": [1], "-is_balanced": [1], "-djdot_alpha": [0.1], "-source": ["MNIST"], "-target": ["USPS"], "-epoch_to_start_align": [11], "-n_epochs": [100], "-batch_size": [500], "-initialize_model": [1], "-init_batch_size": [32], "-init_lr": [10 ** -2], "-random_seed": random_seed } class DjdotImputMNISTUSPS(object): MAX_NB_PROCESSES = 2 DEBUG = False BINARY = "experiments/launcher/digits_binary.py" GRID = { "-mode": ["djdot_imput"], "-adaptive_lr": [1], "-source": ["MNIST"], "-target": ["USPS"], "-is_balanced": [1], "-epoch_to_start_align": [11], "-stop_grad": [1], "-djdot_alpha": [0.1], "-bigger_reconstructor": [1], "-n_epochs": [100], "-batch_size": [500], "-initialize_model": [1], "-init_batch_size": [32], "-init_lr": [10 ** -2], "-activate_mse": [activate_mse], "-activate_adaptation_imp": [activate_adaptation_imp], "-activate_adaptation_d1": [activate_adaptation_d1], "-random_seed": random_seed }
# encoding: utf-8 # module cv2.xphoto # from /home/davtoh/anaconda3/envs/rrtools/lib/python3.5/site-packages/cv2.cpython-35m-x86_64-linux-gnu.so # by generator 1.144 # no doc # no imports # Variables with simple values BM3D_STEP1 = 1 BM3D_STEP2 = 2 BM3D_STEPALL = 0 HAAR = 0 INPAINT_SHIFTMAP = 0 __loader__ = None __spec__ = None # functions # real signature unknown; restored from __doc__ def applyChannelGains(src, gainB, gainG, gainR, dst=None): """ applyChannelGains(src, gainB, gainG, gainR[, dst]) -> dst """ pass def bm3dDenoising(src, dstStep1, dstStep2=None, h=None, templateWindowSize=None, searchWindowSize=None, blockMatchingStep1=None, blockMatchingStep2=None, groupSize=None, slidingStep=None, beta=None, normType=None, step=None, transformType=None): # real signature unknown; restored from __doc__ """ bm3dDenoising(src, dstStep1[, dstStep2[, h[, templateWindowSize[, searchWindowSize[, blockMatchingStep1[, blockMatchingStep2[, groupSize[, slidingStep[, beta[, normType[, step[, transformType]]]]]]]]]]]]) -> dstStep1, dstStep2 or bm3dDenoising(src[, dst[, h[, templateWindowSize[, searchWindowSize[, blockMatchingStep1[, blockMatchingStep2[, groupSize[, slidingStep[, beta[, normType[, step[, transformType]]]]]]]]]]]]) -> dst """ pass def createGrayworldWB(): # real signature unknown; restored from __doc__ """ createGrayworldWB() -> retval """ pass # real signature unknown; restored from __doc__ def createLearningBasedWB(path_to_model=None): """ createLearningBasedWB([, path_to_model]) -> retval """ pass def createSimpleWB(): # real signature unknown; restored from __doc__ """ createSimpleWB() -> retval """ pass # real signature unknown; restored from __doc__ def dctDenoising(src, dst, sigma, psize=None): """ dctDenoising(src, dst, sigma[, psize]) -> None """ pass def inpaint(src, mask, dst, algorithmType): # real signature unknown; restored from __doc__ """ inpaint(src, mask, dst, algorithmType) -> None """ pass # no classes
# flopy version file automatically created using...pre-commit.py # created on...March 20, 2018 17:03:11 major = 3 minor = 2 micro = 9 build = 60 commit = 2731 __version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro) __build__ = '{:d}.{:d}.{:d}.{:d}'.format(major, minor, micro, build) __git_commit__ = '{:d}'.format(commit)
#!/usr/bin/env python3 def main(): with open("dnsservers.txt", "r") as dnsfile: for svr in dnsfile: svr = svr.rstrip('\n') # remove newline char if exists # would exists on all but last line # IF the string svr ends with 'org' if svr.endswith('org'): with open("org-domain.txt", "a") as srvfile: # 'a' is append mode srvfile.write(svr + "\n") # ELSE-IF the string svr ends with 'com' elif svr.endswith('com'): with open("com-domain.txt", "a") as srvfile: # 'a' is append mode srvfile.write(svr + "\n") main()
a, b = map(int, input('').split(' ')) n = int(input('')) ans = 0 for i in range(n): shop = [int(i) for i in input('').split(' ') if abs(int(i)) == a or b] if shop.count(a) > shop.count(-a) and shop.count(b) > shop.count(-b): ans += 1 print(ans)
# Break Statement : greetings = ["Hello","World","!!!"] for x in greetings: print(x) if (x == "World"): break #Breaks the loop when condition matches print() for x in range (0,22,2): if (x == 10): continue #Skips the current iteration when condition matches print(x) input("Press Enter key to exit ")
HOST = '127.0.0.1' USERNAME = 'guest' PASSWORD = 'guest' URI = 'amqp://guest:guest@127.0.0.1:5672/%2F' HTTP_URL = 'http://127.0.0.1:15672'
load("@bazel_skylib//lib:paths.bzl", "paths") def _add_data_impl(ctx): (_, extension) = paths.split_extension(ctx.executable.executable.path) executable = ctx.actions.declare_file( ctx.label.name + extension, ) ctx.actions.symlink( output = executable, target_file = ctx.executable.executable, is_executable = True, ) runfiles = ctx.runfiles(files = [executable, ctx.executable.executable] + ctx.files.data) runfiles = runfiles.merge(ctx.attr.executable[DefaultInfo].default_runfiles) for data_dep in ctx.attr.data: runfiles = runfiles.merge(data_dep[DefaultInfo].default_runfiles) return [DefaultInfo( executable = executable, files = depset(direct = [executable]), runfiles = runfiles, )] add_data = rule( _add_data_impl, attrs = { "executable": attr.label( executable = True, cfg = "target", doc = "Create a symlink to this executable", ), "data": attr.label_list( allow_files = True, doc = "Add these data files to the executable's runfiles", ), }, executable = True, doc = "Creates a new target for the given executable with additional runfiles.", )
# -*- coding: utf-8 -*- """ File Name: missingNumber Author : jing Date: 2020/4/13 0~n-1中缺失的数字 https://leetcode-cn.com/problems/que-shi-de-shu-zi-lcof/ """ class Solution: def missingNumber(self, nums: List[int]) -> int: if nums is None or len(nums) == 0: return 0 len_n = len(nums) sum_n = (0 + len_n) * (len_n+1) // 2 sum_nn = 0 for n in nums: sum_nn += n return sum_n - sum_nn
""" """ def words_to_snake_case(s): components = s.split(' ') return '_'.join(x.lower() for x in components)
#!/usr/bin/env python # -*- coding: utf-8 -*- """ 功能实现:在字典中查找最小值的键。 解读: 使用min()并将key参数设置为dict.get()来查找并返回给定字典中最小值的键。 """ def key_of_min(d): return min(d, key=d.get) # Examples print(key_of_min({'a': 4, 'b': 0, 'c': 13})) # output: # b
#What will this script produce? #A: 3 a = 1 a = 2 a = 3 print(a)
#addintersert3.py def addInterest(balances, rate): for i in range(len(balances)): balances[i] = balances[i] * (1 + rate) def main(): amounts = [1000, 105, 3500, 739] rate = 0.05 addInterest(amounts, rate) print(amounts) main()
# -*- coding: utf-8 -*- ABBREVIATIONS = [ 'dr', 'jr', 'mr', 'mrs', 'ms', 'msgr', 'prof', 'sr', 'st'] SUB_PAIRS = [ ('M.', 'Monsieur') ] ALL_PUNC = u"?!?!.,¡()[]¿…‥،;:—。,、:\n" TONE_MARKS = u"?!?!" PERIOD_COMMA = u".," COLON = u":"
# INTERNAL_ONLY_PROPERTIES defines the properties in the config that, while settable, should # not be documented for external users. These will generally be used for internal test or only # given to customers when they have been briefed on the side effects of using them. INTERNAL_ONLY_PROPERTIES = { "__module__", "__doc__", "create_transaction", "SESSION_COOKIE_NAME", "SESSION_COOKIE_HTTPONLY", "SESSION_COOKIE_SAMESITE", "DATABASE_SECRET_KEY", "V22_NAMESPACE_BLACKLIST", "MAXIMUM_CNR_LAYER_SIZE", "OCI_NAMESPACE_WHITELIST", "FEATURE_GENERAL_OCI_SUPPORT", "FEATURE_HELM_OCI_SUPPORT", "FEATURE_NAMESPACE_GARBAGE_COLLECTION", "FEATURE_REPOSITORY_GARBAGE_COLLECTION", "FEATURE_REPOSITORY_ACTION_COUNTER", "APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST", "APP_REGISTRY_SHOW_PACKAGE_CACHE_WHITELIST", "FEATURE_MANIFEST_SIZE_BACKFILL", "TESTING", "SEND_FILE_MAX_AGE_DEFAULT", "DISABLED_FOR_AUDIT_LOGS", "DISABLED_FOR_PULL_LOGS", "FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES", "FEATURE_CLEAR_EXPIRED_RAC_ENTRIES", "ACTION_LOG_MAX_PAGE", "NON_RATE_LIMITED_NAMESPACES", "REPLICATION_QUEUE_NAME", "DOCKERFILE_BUILD_QUEUE_NAME", "CHUNK_CLEANUP_QUEUE_NAME", "SECURITY_SCANNER_ISSUER_NAME", "NOTIFICATION_QUEUE_NAME", "REPOSITORY_GC_QUEUE_NAME", "NAMESPACE_GC_QUEUE_NAME", "EXPORT_ACTION_LOGS_QUEUE_NAME", "SECSCAN_V4_NOTIFICATION_QUEUE_NAME", "FEATURE_BILLING", "BILLING_TYPE", "INSTANCE_SERVICE_KEY_LOCATION", "INSTANCE_SERVICE_KEY_REFRESH", "INSTANCE_SERVICE_KEY_SERVICE", "INSTANCE_SERVICE_KEY_KID_LOCATION", "INSTANCE_SERVICE_KEY_EXPIRATION", "UNAPPROVED_SERVICE_KEY_TTL_SEC", "EXPIRED_SERVICE_KEY_TTL_SEC", "REGISTRY_JWT_AUTH_MAX_FRESH_S", "SERVICE_LOG_ACCOUNT_ID", "BUILDLOGS_OPTIONS", "LIBRARY_NAMESPACE", "STAGGER_WORKERS", "QUEUE_WORKER_METRICS_REFRESH_SECONDS", "PUSH_TEMP_TAG_EXPIRATION_SEC", "GARBAGE_COLLECTION_FREQUENCY", "PAGE_TOKEN_KEY", "BUILD_MANAGER", "JWTPROXY_AUDIENCE", "JWTPROXY_SIGNER", "SECURITY_SCANNER_INDEXING_MIN_ID", "SECURITY_SCANNER_V4_REINDEX_THRESHOLD", "STATIC_SITE_BUCKET", "LABEL_KEY_RESERVED_PREFIXES", "TEAM_SYNC_WORKER_FREQUENCY", "JSONIFY_PRETTYPRINT_REGULAR", "TUF_GUN_PREFIX", "LOGGING_LEVEL", "SIGNED_GRANT_EXPIRATION_SEC", "PROMETHEUS_PUSHGATEWAY_URL", "DB_TRANSACTION_FACTORY", "NOTIFICATION_SEND_TIMEOUT", "QUEUE_METRICS_TYPE", "MAIL_FAIL_SILENTLY", "LOCAL_OAUTH_HANDLER", "USE_CDN", "ANALYTICS_TYPE", "LAST_ACCESSED_UPDATE_THRESHOLD_S", "GREENLET_TRACING", "EXCEPTION_LOG_TYPE", "SENTRY_DSN", "SENTRY_PUBLIC_DSN", "BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT", "THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT", "IP_DATA_API_KEY", "SECURITY_SCANNER_ENDPOINT_BATCH", "SECURITY_SCANNER_API_TIMEOUT_SECONDS", "SECURITY_SCANNER_API_TIMEOUT_POST_SECONDS", "SECURITY_SCANNER_ENGINE_VERSION_TARGET", "SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS", "SECURITY_SCANNER_API_VERSION", "REPO_MIRROR_INTERVAL", "DATA_MODEL_CACHE_CONFIG", # TODO: move this into the schema once we support signing in QE. "FEATURE_SIGNING", "TUF_SERVER", "V1_ONLY_DOMAIN", "LOGS_MODEL", "LOGS_MODEL_CONFIG", "APP_REGISTRY_RESULTS_LIMIT", "V3_UPGRADE_MODE", # Deprecated old flag "ACCOUNT_RECOVERY_MODE", } CONFIG_SCHEMA = { "type": "object", "description": "Schema for Quay configuration", "required": [ "PREFERRED_URL_SCHEME", "SERVER_HOSTNAME", "DB_URI", "AUTHENTICATION_TYPE", "DISTRIBUTED_STORAGE_CONFIG", "BUILDLOGS_REDIS", "USER_EVENTS_REDIS", "DISTRIBUTED_STORAGE_PREFERENCE", "DEFAULT_TAG_EXPIRATION", "TAG_EXPIRATION_OPTIONS", ], "properties": { "REGISTRY_STATE": { "type": "string", "description": "The state of the registry.", "enum": ["normal", "readonly"], "x-example": "readonly", }, # Hosting. "PREFERRED_URL_SCHEME": { "type": "string", "description": "The URL scheme to use when hitting Quay. If Quay is behind SSL *at all*, this *must* be `https`", "enum": ["http", "https"], "x-example": "https", }, "SERVER_HOSTNAME": { "type": "string", "description": "The URL at which Quay is accessible, without the scheme.", "x-example": "quay.io", }, "EXTERNAL_TLS_TERMINATION": { "type": "boolean", "description": "If TLS is supported, but terminated at a layer before Quay, must be true.", "x-example": True, }, # SSL/TLS. "SSL_CIPHERS": { "type": "array", "description": "If specified, the nginx-defined list of SSL ciphers to enabled and disabled", "x-example": ["CAMELLIA", "!3DES"], "x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers", }, "SSL_PROTOCOLS": { "type": "array", "description": "If specified, the nginx-defined list of SSL protocols to enabled and disabled", "x-example": ["TLSv1.1", "TLSv1.2"], "x-reference": "http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols", }, # User-visible configuration. "REGISTRY_TITLE": { "type": "string", "description": "If specified, the long-form title for the registry. Defaults to `Red Hat Quay`.", "x-example": "Corp Container Service", }, "REGISTRY_TITLE_SHORT": { "type": "string", "description": "If specified, the short-form title for the registry. Defaults to `Red Hat Quay`.", "x-example": "CCS", }, "CONTACT_INFO": { "type": "array", "uniqueItems": True, "description": "If specified, contact information to display on the contact page. " + "If only a single piece of contact information is specified, the contact footer will link directly.", "items": [ { "type": "string", "pattern": "^mailto:(.)+$", "x-example": "mailto:admin@example.com", "description": "Adds a link to send an e-mail", }, { "type": "string", "pattern": "^irc://(.)+$", "x-example": "irc://chat.freenode.net:6665/quay", "description": "Adds a link to visit an IRC chat room", }, { "type": "string", "pattern": "^tel:(.)+$", "x-example": "tel:+1-888-930-3475", "description": "Adds a link to call a phone number", }, { "type": "string", "pattern": "^http(s)?://(.)+$", "x-example": "https://twitter.com/quayio", "description": "Adds a link to a defined URL", }, ], }, "SEARCH_RESULTS_PER_PAGE": { "type": "number", "description": "Number of results returned per page by search page. Defaults to 10", "x-example": 10, }, "SEARCH_MAX_RESULT_PAGE_COUNT": { "type": "number", "description": "Maximum number of pages the user can paginate in search before they are limited. Defaults to 10", "x-example": 10, }, # E-mail. "FEATURE_MAILING": { "type": "boolean", "description": "Whether emails are enabled. Defaults to True", "x-example": True, }, "MAIL_SERVER": { "type": "string", "description": "The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true.", "x-example": "smtp.somedomain.com", }, "MAIL_USE_TLS": { "type": "boolean", "description": "If specified, whether to use TLS for sending e-mails.", "x-example": True, }, "MAIL_PORT": { "type": "number", "description": "The SMTP port to use. If not specified, defaults to 587.", "x-example": 588, }, "MAIL_USERNAME": { "type": ["string", "null"], "description": "The SMTP username to use when sending e-mails.", "x-example": "myuser", }, "MAIL_PASSWORD": { "type": ["string", "null"], "description": "The SMTP password to use when sending e-mails.", "x-example": "mypassword", }, "MAIL_DEFAULT_SENDER": { "type": ["string", "null"], "description": "If specified, the e-mail address used as the `from` when Quay sends e-mails. If none, defaults to `admin@example.com`.", "x-example": "support@myco.com", }, # Database. "DB_URI": { "type": "string", "description": "The URI at which to access the database, including any credentials.", "x-example": "mysql+pymysql://username:password@dns.of.database/quay", "x-reference": "https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495", }, "DB_CONNECTION_ARGS": { "type": "object", "description": "If specified, connection arguments for the database such as timeouts and SSL.", "properties": { "threadlocals": { "type": "boolean", "description": "Whether to use thread-local connections. Should *ALWAYS* be `true`", }, "autorollback": { "type": "boolean", "description": "Whether to use auto-rollback connections. Should *ALWAYS* be `true`", }, "ssl": { "type": "object", "description": "SSL connection configuration", "properties": { "ca": { "type": "string", "description": "*Absolute container path* to the CA certificate to use for SSL connections", "x-example": "conf/stack/ssl-ca-cert.pem", }, }, "required": ["ca"], }, }, "required": ["threadlocals", "autorollback"], }, "ALLOW_PULLS_WITHOUT_STRICT_LOGGING": { "type": "boolean", "description": "If true, pulls in which the pull audit log entry cannot be written will " + "still succeed. Useful if the database can fallback into a read-only state " + "and it is desired for pulls to continue during that time. Defaults to False.", "x-example": True, }, # Storage. "FEATURE_STORAGE_REPLICATION": { "type": "boolean", "description": "Whether to automatically replicate between storage engines. Defaults to False", "x-example": False, }, "FEATURE_PROXY_STORAGE": { "type": "boolean", "description": "Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False", "x-example": False, }, "MAXIMUM_LAYER_SIZE": { "type": "string", "description": "Maximum allowed size of an image layer. Defaults to 20G", "x-example": "100G", "pattern": "^[0-9]+(G|M)$", }, "DISTRIBUTED_STORAGE_CONFIG": { "type": "object", "description": "Configuration for storage engine(s) to use in Quay. Each key is a unique ID" + " for a storage engine, with the value being a tuple of the type and " + " configuration for that engine.", "x-example": { "local_storage": ["LocalStorage", {"storage_path": "some/path/"}], }, "items": { "type": "array", }, }, "DISTRIBUTED_STORAGE_PREFERENCE": { "type": "array", "description": "The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to " + "use. A preferred engine means it is first checked for pullig and images are " + "pushed to it.", "items": { "type": "string", "uniqueItems": True, }, "x-example": ["s3_us_east", "s3_us_west"], }, "DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS": { "type": "array", "description": "The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose " + "images should be fully replicated, by default, to all other storage engines.", "items": { "type": "string", "uniqueItems": True, }, "x-example": ["s3_us_east", "s3_us_west"], }, "USERFILES_LOCATION": { "type": "string", "description": "ID of the storage engine in which to place user-uploaded files", "x-example": "s3_us_east", }, "USERFILES_PATH": { "type": "string", "description": "Path under storage in which to place user-uploaded files", "x-example": "userfiles", }, "ACTION_LOG_ARCHIVE_LOCATION": { "type": "string", "description": "If action log archiving is enabled, the storage engine in which to place the " + "archived data.", "x-example": "s3_us_east", }, "ACTION_LOG_ARCHIVE_PATH": { "type": "string", "description": "If action log archiving is enabled, the path in storage in which to place the " + "archived data.", "x-example": "archives/actionlogs", }, "ACTION_LOG_ROTATION_THRESHOLD": { "type": "string", "description": "If action log archiving is enabled, the time interval after which to " + "archive data.", "x-example": "30d", }, "LOG_ARCHIVE_LOCATION": { "type": "string", "description": "If builds are enabled, the storage engine in which to place the " + "archived build logs.", "x-example": "s3_us_east", }, "LOG_ARCHIVE_PATH": { "type": "string", "description": "If builds are enabled, the path in storage in which to place the " + "archived build logs.", "x-example": "archives/buildlogs", }, # Authentication. "AUTHENTICATION_TYPE": { "type": "string", "description": "The authentication engine to use for credential authentication.", "x-example": "Database", "enum": ["Database", "LDAP", "JWT", "Keystone", "OIDC", "AppToken"], }, "SUPER_USERS": { "type": "array", "description": "Quay usernames of those users to be granted superuser privileges", "uniqueItems": True, "items": { "type": "string", }, }, "DIRECT_OAUTH_CLIENTID_WHITELIST": { "type": "array", "description": "A list of client IDs of *Quay-managed* applications that are allowed " + "to perform direct OAuth approval without user approval.", "x-reference": "https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html", "uniqueItems": True, "items": { "type": "string", }, }, # Redis. "BUILDLOGS_REDIS": { "type": "object", "description": "Connection information for Redis for build logs caching", "required": ["host"], "properties": { "host": { "type": "string", "description": "The hostname at which Redis is accessible", "x-example": "my.redis.cluster", }, "port": { "type": "number", "description": "The port at which Redis is accessible", "x-example": 1234, }, "password": { "type": "string", "description": "The password to connect to the Redis instance", "x-example": "mypassword", }, }, }, "USER_EVENTS_REDIS": { "type": "object", "description": "Connection information for Redis for user event handling", "required": ["host"], "properties": { "host": { "type": "string", "description": "The hostname at which Redis is accessible", "x-example": "my.redis.cluster", }, "port": { "type": "number", "description": "The port at which Redis is accessible", "x-example": 1234, }, "password": { "type": "string", "description": "The password to connect to the Redis instance", "x-example": "mypassword", }, }, }, # OAuth configuration. "GITHUB_LOGIN_CONFIG": { "type": ["object", "null"], "description": "Configuration for using GitHub (Enterprise) as an external login provider", "required": ["CLIENT_ID", "CLIENT_SECRET"], "x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-auth.html", "properties": { "GITHUB_ENDPOINT": { "type": "string", "description": "The endpoint of the GitHub (Enterprise) being hit", "x-example": "https://github.com/", }, "API_ENDPOINT": { "type": "string", "description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com", "x-example": "https://api.github.com/", }, "CLIENT_ID": { "type": "string", "description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_TRIGGER_CONFIG", "x-example": "0e8dbe15c4c7630b6780", "x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html", }, "CLIENT_SECRET": { "type": "string", "description": "The registered client secret for this Quay instance", "x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846", "x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html", }, "ORG_RESTRICT": { "type": "boolean", "description": "If true, only users within the organization whitelist can login using this provider", "x-example": True, }, "ALLOWED_ORGANIZATIONS": { "type": "array", "description": "The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option", "uniqueItems": True, "items": { "type": "string", }, }, }, }, "BITBUCKET_TRIGGER_CONFIG": { "type": ["object", "null"], "description": "Configuration for using BitBucket for build triggers", "required": ["CONSUMER_KEY", "CONSUMER_SECRET"], "x-reference": "https://coreos.com/quay-enterprise/docs/latest/bitbucket-build.html", "properties": { "CONSUMER_KEY": { "type": "string", "description": "The registered consumer key (client ID) for this Quay instance", "x-example": "0e8dbe15c4c7630b6780", }, "CONSUMER_SECRET": { "type": "string", "description": "The registered consumer secret (client secret) for this Quay instance", "x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846", }, }, }, "GITHUB_TRIGGER_CONFIG": { "type": ["object", "null"], "description": "Configuration for using GitHub (Enterprise) for build triggers", "required": ["GITHUB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"], "x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-build.html", "properties": { "GITHUB_ENDPOINT": { "type": "string", "description": "The endpoint of the GitHub (Enterprise) being hit", "x-example": "https://github.com/", }, "API_ENDPOINT": { "type": "string", "description": "The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com", "x-example": "https://api.github.com/", }, "CLIENT_ID": { "type": "string", "description": "The registered client ID for this Quay instance; cannot be shared with GITHUB_LOGIN_CONFIG", "x-example": "0e8dbe15c4c7630b6780", "x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html", }, "CLIENT_SECRET": { "type": "string", "description": "The registered client secret for this Quay instance", "x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846", "x-reference": "https://coreos.com/quay-enterprise/docs/latest/github-app.html", }, }, }, "GOOGLE_LOGIN_CONFIG": { "type": ["object", "null"], "description": "Configuration for using Google for external authentication", "required": ["CLIENT_ID", "CLIENT_SECRET"], "properties": { "CLIENT_ID": { "type": "string", "description": "The registered client ID for this Quay instance", "x-example": "0e8dbe15c4c7630b6780", }, "CLIENT_SECRET": { "type": "string", "description": "The registered client secret for this Quay instance", "x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846", }, }, }, "GITLAB_TRIGGER_CONFIG": { "type": ["object", "null"], "description": "Configuration for using Gitlab (Enterprise) for external authentication", "required": ["GITLAB_ENDPOINT", "CLIENT_ID", "CLIENT_SECRET"], "properties": { "GITLAB_ENDPOINT": { "type": "string", "description": "The endpoint at which Gitlab(Enterprise) is running", "x-example": "https://gitlab.com", }, "CLIENT_ID": { "type": "string", "description": "The registered client ID for this Quay instance", "x-example": "0e8dbe15c4c7630b6780", }, "CLIENT_SECRET": { "type": "string", "description": "The registered client secret for this Quay instance", "x-example": "e4a58ddd3d7408b7aec109e85564a0d153d3e846", }, }, }, "BRANDING": { "type": ["object", "null"], "description": "Custom branding for logos and URLs in the Quay UI", "required": ["logo"], "properties": { "logo": { "type": "string", "description": "Main logo image URL", "x-example": "/static/img/quay-horizontal-color.svg", }, "footer_img": { "type": "string", "description": "Logo for UI footer", "x-example": "/static/img/RedHat.svg", }, "footer_url": { "type": "string", "description": "Link for footer image", "x-example": "https://redhat.com", }, }, }, "DOCUMENTATION_ROOT": {"type": "string", "description": "Root URL for documentation links"}, # Health. "HEALTH_CHECKER": { "description": "The configured health check.", "x-example": ("RDSAwareHealthCheck", {"access_key": "foo", "secret_key": "bar"}), }, # Metrics. "PROMETHEUS_NAMESPACE": { "type": "string", "description": "The prefix applied to all exposed Prometheus metrics. Defaults to `quay`", "x-example": "myregistry", }, # Misc configuration. "BLACKLIST_V2_SPEC": { "type": "string", "description": "The Docker CLI versions to which Quay will respond that V2 is *unsupported*. Defaults to `<1.6.0`", "x-reference": "http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec", "x-example": "<1.8.0", }, "USER_RECOVERY_TOKEN_LIFETIME": { "type": "string", "description": "The length of time a token for recovering a user accounts is valid. Defaults to 30m.", "x-example": "10m", "pattern": "^[0-9]+(w|m|d|h|s)$", }, "SESSION_COOKIE_SECURE": { "type": "boolean", "description": "Whether the `secure` property should be set on session cookies. " + "Defaults to False. Recommended to be True for all installations using SSL.", "x-example": True, "x-reference": "https://en.wikipedia.org/wiki/Secure_cookies", }, "PUBLIC_NAMESPACES": { "type": "array", "description": "If a namespace is defined in the public namespace list, then it will appear on *all*" + " user's repository list pages, regardless of whether that user is a member of the namespace." + ' Typically, this is used by an enterprise customer in configuring a set of "well-known"' + " namespaces.", "uniqueItems": True, "items": { "type": "string", }, }, "AVATAR_KIND": { "type": "string", "description": "The types of avatars to display, either generated inline (local) or Gravatar (gravatar)", "enum": ["local", "gravatar"], }, "V2_PAGINATION_SIZE": { "type": "number", "description": "The number of results returned per page in V2 registry APIs", "x-example": 100, }, "ENABLE_HEALTH_DEBUG_SECRET": { "type": ["string", "null"], "description": "If specified, a secret that can be given to health endpoints to see full debug info when" + "not authenticated as a superuser", "x-example": "somesecrethere", }, "BROWSER_API_CALLS_XHR_ONLY": { "type": "boolean", "description": "If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True.", "x-example": False, }, # Time machine and tag expiration settings. "FEATURE_CHANGE_TAG_EXPIRATION": { "type": "boolean", "description": "Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True.", "x-example": False, }, "DEFAULT_TAG_EXPIRATION": { "type": "string", "description": "The default, configurable tag expiration time for time machine. Defaults to `2w`.", "pattern": "^[0-9]+(w|m|d|h|s)$", }, "TAG_EXPIRATION_OPTIONS": { "type": "array", "description": "The options that users can select for expiration of tags in their namespace (if enabled)", "items": { "type": "string", "pattern": "^[0-9]+(w|m|d|h|s)$", }, }, # Team syncing. "FEATURE_TEAM_SYNCING": { "type": "boolean", "description": "Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone)", "x-example": True, }, "TEAM_RESYNC_STALE_TIME": { "type": "string", "description": "If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m)", "x-example": "2h", "pattern": "^[0-9]+(w|m|d|h|s)$", }, "FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP": { "type": "boolean", "description": "If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False.", "x-example": True, }, # Security scanning. "FEATURE_SECURITY_SCANNER": { "type": "boolean", "description": "Whether to turn of/off the security scanner. Defaults to False", "x-example": False, "x-reference": "https://coreos.com/quay-enterprise/docs/latest/security-scanning.html", }, "FEATURE_SECURITY_NOTIFICATIONS": { "type": "boolean", "description": "If the security scanner is enabled, whether to turn of/off security notificaitons. Defaults to False", "x-example": False, }, "SECURITY_SCANNER_ENDPOINT": { "type": "string", "pattern": "^http(s)?://(.)+$", "description": "The endpoint for the V2 security scanner", "x-example": "http://192.168.99.101:6060", }, "SECURITY_SCANNER_V4_ENDPOINT": { "type": ["string", "null"], "pattern": "^http(s)?://(.)+$", "description": "The endpoint for the V4 security scanner", "x-example": "http://192.168.99.101:6060", }, "SECURITY_SCANNER_INDEXING_INTERVAL": { "type": "number", "description": "The number of seconds between indexing intervals in the security scanner. Defaults to 30.", "x-example": 30, }, "SECURITY_SCANNER_V4_PSK": { "type": "string", "description": "A base64 encoded string used to sign JWT(s) on Clair V4 requests. If 'None' jwt signing will not occur.", "x-example": "PSK", }, # Repository mirroring "REPO_MIRROR_INTERVAL": { "type": "number", "description": "The number of seconds between checking for repository mirror candidates. Defaults to 30.", "x-example": 30, }, # Build "FEATURE_GITHUB_BUILD": { "type": "boolean", "description": "Whether to support GitHub build triggers. Defaults to False", "x-example": False, }, "FEATURE_BITBUCKET_BUILD": { "type": "boolean", "description": "Whether to support Bitbucket build triggers. Defaults to False", "x-example": False, }, "FEATURE_GITLAB_BUILD": { "type": "boolean", "description": "Whether to support GitLab build triggers. Defaults to False", "x-example": False, }, "FEATURE_BUILD_SUPPORT": { "type": "boolean", "description": "Whether to support Dockerfile build. Defaults to True", "x-example": True, }, "DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT": { "type": ["number", "null"], "description": "If not None, the default maximum number of builds that can be queued in a namespace.", "x-example": 20, }, "SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD": { "type": ["number", "null"], "description": "If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled. Defaults to 5.", "x-example": 10, }, "SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD": { "type": ["number", "null"], "description": "If not None, the number of successive failures that can occur before a build trigger is automatically disabled. Defaults to 100.", "x-example": 50, }, # Nested repository names "FEATURE_EXTENDED_REPOSITORY_NAMES": { "type": "boolean", "description": "Whether repository names can have nested paths (/)", "x-example": False, }, # Login "FEATURE_GITHUB_LOGIN": { "type": "boolean", "description": "Whether GitHub login is supported. Defaults to False", "x-example": False, }, "FEATURE_GOOGLE_LOGIN": { "type": "boolean", "description": "Whether Google login is supported. Defaults to False", "x-example": False, }, # Recaptcha "FEATURE_RECAPTCHA": { "type": "boolean", "description": "Whether Recaptcha is necessary for user login and recovery. Defaults to False", "x-example": False, "x-reference": "https://www.google.com/recaptcha/intro/", }, "RECAPTCHA_SITE_KEY": { "type": ["string", "null"], "description": "If recaptcha is enabled, the site key for the Recaptcha service", }, "RECAPTCHA_SECRET_KEY": { "type": ["string", "null"], "description": "If recaptcha is enabled, the secret key for the Recaptcha service", }, # External application tokens. "FEATURE_APP_SPECIFIC_TOKENS": { "type": "boolean", "description": "If enabled, users can create tokens for use by the Docker CLI. Defaults to True", "x-example": False, }, "APP_SPECIFIC_TOKEN_EXPIRATION": { "type": ["string", "null"], "description": "The expiration for external app tokens. Defaults to None.", "pattern": "^[0-9]+(w|m|d|h|s)$", }, "EXPIRED_APP_SPECIFIC_TOKEN_GC": { "type": ["string", "null"], "description": "Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d.", "pattern": "^[0-9]+(w|m|d|h|s)$", }, # Feature Flag: Garbage collection. "FEATURE_GARBAGE_COLLECTION": { "type": "boolean", "description": "Whether garbage collection of repositories is enabled. Defaults to True", "x-example": False, }, # Feature Flag: Rate limits. "FEATURE_RATE_LIMITS": { "type": "boolean", "description": "Whether to enable rate limits on API and registry endpoints. Defaults to False", "x-example": True, }, # Feature Flag: Aggregated log retrieval. "FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL": { "type": "boolean", "description": "Whether to allow retrieval of aggregated log counts. Defaults to True", "x-example": True, }, # Feature Flag: Log export. "FEATURE_LOG_EXPORT": { "type": "boolean", "description": "Whether to allow exporting of action logs. Defaults to True", "x-example": True, }, # Feature Flag: User last accessed. "FEATURE_USER_LAST_ACCESSED": { "type": "boolean", "description": "Whether to record the last time a user was accessed. Defaults to True", "x-example": True, }, # Feature Flag: Permanent Sessions. "FEATURE_PERMANENT_SESSIONS": { "type": "boolean", "description": "Whether sessions are permanent. Defaults to True", "x-example": True, }, # Feature Flag: Super User Support. "FEATURE_SUPER_USERS": { "type": "boolean", "description": "Whether super users are supported. Defaults to True", "x-example": True, }, # Feature Flag: Use FIPS compliant cryptography. "FEATURE_FIPS": { "type": "boolean", "description": "If set to true, Quay will run using FIPS compliant hash functions. Defaults to False", "x-example": True, }, # Feature Flag: Anonymous Users. "FEATURE_ANONYMOUS_ACCESS": { "type": "boolean", "description": " Whether to allow anonymous users to browse and pull public repositories. Defaults to True", "x-example": True, }, # Feature Flag: User Creation. "FEATURE_USER_CREATION": { "type": "boolean", "description": "Whether users can be created (by non-super users). Defaults to True", "x-example": True, }, # Feature Flag: Invite Only User Creation. "FEATURE_INVITE_ONLY_USER_CREATION": { "type": "boolean", "description": "Whether users being created must be invited by another user. Defaults to False", "x-example": False, }, # Feature Flag: Encrypted Basic Auth. "FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH": { "type": "boolean", "description": "Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False", "x-example": False, }, # Feature Flag: Direct Login. "FEATURE_DIRECT_LOGIN": { "type": "boolean", "description": "Whether users can directly login to the UI. Defaults to True", "x-example": True, }, # Feature Flag: Advertising V2. "FEATURE_ADVERTISE_V2": { "type": "boolean", "description": "Whether the v2/ endpoint is visible. Defaults to True", "x-example": True, }, # Feature Flag: Log Rotation. "FEATURE_ACTION_LOG_ROTATION": { "type": "boolean", "description": "Whether or not to rotate old action logs to storage. Defaults to False", "x-example": False, }, # Feature Flag: ACI Conversion. "FEATURE_ACI_CONVERSION": { "type": "boolean", "description": "Whether to enable conversion to ACIs. Defaults to False", "x-example": False, }, # Feature Flag: Library Support. "FEATURE_LIBRARY_SUPPORT": { "type": "boolean", "description": 'Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True', "x-example": True, }, # Feature Flag: Require Team Invite. "FEATURE_REQUIRE_TEAM_INVITE": { "type": "boolean", "description": "Whether to require invitations when adding a user to a team. Defaults to True", "x-example": True, }, # Feature Flag: Collecting and Supporting Metadata. "FEATURE_USER_METADATA": { "type": "boolean", "description": "Whether to collect and support user metadata. Defaults to False", "x-example": False, }, # Feature Flag: Support App Registry. "FEATURE_APP_REGISTRY": { "type": "boolean", "description": "Whether to enable support for App repositories. Defaults to False", "x-example": False, }, # Feature Flag: Read only app registry. "FEATURE_READONLY_APP_REGISTRY": { "type": "boolean", "description": "Whether to App repositories are read-only. Defaults to False", "x-example": True, }, # Feature Flag: Public Reposiotires in _catalog Endpoint. "FEATURE_PUBLIC_CATALOG": { "type": "boolean", "description": "If set to true, the _catalog endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False", "x-example": False, }, # Feature Flag: Reader Build Logs. "FEATURE_READER_BUILD_LOGS": { "type": "boolean", "description": "If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False", "x-example": False, }, # Feature Flag: Usernames Autocomplete. "FEATURE_PARTIAL_USER_AUTOCOMPLETE": { "type": "boolean", "description": "If set to true, autocompletion will apply to partial usernames. Defaults to True", "x-example": True, }, # Feature Flag: User log access. "FEATURE_USER_LOG_ACCESS": { "type": "boolean", "description": "If set to true, users will have access to audit logs for their namespace. Defaults to False", "x-example": True, }, # Feature Flag: User renaming. "FEATURE_USER_RENAME": { "type": "boolean", "description": "If set to true, users can rename their own namespace. Defaults to False", "x-example": True, }, # Feature Flag: Username confirmation. "FEATURE_USERNAME_CONFIRMATION": { "type": "boolean", "description": "If set to true, users can confirm their generated usernames. Defaults to True", "x-example": False, }, # Feature Flag: V1 push restriction. "FEATURE_RESTRICTED_V1_PUSH": { "type": "boolean", "description": "If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True", "x-example": False, }, # Feature Flag: Support Repository Mirroring. "FEATURE_REPO_MIRROR": { "type": "boolean", "description": "Whether to enable support for repository mirroring. Defaults to False", "x-example": False, }, "REPO_MIRROR_TLS_VERIFY": { "type": "boolean", "description": "Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True", "x-example": True, }, "REPO_MIRROR_SERVER_HOSTNAME": { "type": ["string", "null"], "description": "Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset", "x-example": "openshift-quay-service", }, # Feature Flag: V1 push restriction. "V1_PUSH_WHITELIST": { "type": "array", "description": "The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true.", "x-example": ["some", "namespaces"], }, # Logs model "LOGS_MODEL": { "type": "string", "description": "Logs model for action logs", "enum": ["database", "transition_reads_both_writes_es", "elasticsearch"], "x-example": "database", }, "LOGS_MODEL_CONFIG": { "type": "object", "description": "Logs model config for action logs", "x-reference": "https://www.elastic.co/guide/en/elasticsearch/guide/master/_index_settings.html", "properties": { "producer": { "type": "string", "description": "Logs producer if logging to Elasticsearch", "enum": ["kafka", "elasticsearch", "kinesis_stream"], "x-example": "kafka", }, "elasticsearch_config": { "type": "object", "description": "Elasticsearch cluster configuration", "properties": { "host": { "type": "string", "description": "Elasticsearch cluster endpoint", "x-example": "host.elasticsearch.example", }, "port": { "type": "number", "description": "Elasticsearch cluster endpoint port", "x-example": 1234, }, "access_key": { "type": "string", "description": "Elasticsearch user (or IAM key for AWS ES)", "x-example": "some_string", }, "secret_key": { "type": "string", "description": "Elasticsearch password (or IAM secret for AWS ES)", "x-example": "some_secret_string", }, "aws_region": { "type": "string", "description": "Amazon web service region", "x-example": "us-east-1", }, "use_ssl": { "type": "boolean", "description": "Use ssl for Elasticsearch. Defaults to True", "x-example": True, }, "index_prefix": { "type": "string", "description": "Elasticsearch's index prefix", "x-example": "logentry_", }, "index_settings": { "type": "object", "description": "Elasticsearch's index settings", }, }, }, "kafka_config": { "type": "object", "description": "Kafka cluster configuration", "properties": { "bootstrap_servers": { "type": "array", "description": "List of Kafka brokers to bootstrap the client from", "uniqueItems": True, "items": { "type": "string", }, }, "topic": { "type": "string", "description": "Kafka topic to publish log entries to", "x-example": "logentry", }, "max_block_seconds": { "type": "number", "description": "Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable", "x-example": 10, }, }, }, "kinesis_stream_config": { "type": "object", "description": "AWS Kinesis Stream configuration", "properties": { "stream_name": { "type": "string", "description": "Kinesis stream to send action logs to", "x-example": "logentry-kinesis-stream", }, "aws_region": { "type": "string", "description": "AWS region", "x-example": "us-east-1", }, "aws_access_key": { "type": "string", "description": "AWS access key", "x-example": "some_access_key", }, "aws_secret_key": { "type": "string", "description": "AWS secret key", "x-example": "some_secret_key", }, "connect_timeout": { "type": "number", "description": "Number of seconds before timeout when attempting to make a connection", "x-example": 5, }, "read_timeout": { "type": "number", "description": "Number of seconds before timeout when reading from a connection", "x-example": 5, }, "retries": { "type": "number", "description": "Max number of attempts made on a single request", "x-example": 5, }, "max_pool_connections": { "type": "number", "description": "The maximum number of connections to keep in a connection pool", "x-example": 10, }, }, }, }, }, # Feature Flag: Blacklist Email Domains "FEATURE_BLACKLISTED_EMAILS": { "type": "boolean", "description": "If set to true, no new User accounts may be created if their email domain is blacklisted.", "x-example": False, }, # Blacklisted Email Domains "BLACKLISTED_EMAIL_DOMAINS": { "type": "array", "description": "The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true.", "x-example": ["example.com", "example.org"], }, "FRESH_LOGIN_TIMEOUT": { "type": "string", "description": "The time after which a fresh login requires users to reenter their password", "x-example": "5m", }, # Webhook blacklist. "WEBHOOK_HOSTNAME_BLACKLIST": { "type": "array", "description": "The set of hostnames to disallow from webhooks when validating, beyond localhost", "x-example": ["somexternaldomain.com"], }, "CREATE_PRIVATE_REPO_ON_PUSH": { "type": "boolean", "description": "Whether new repositories created by push are set to private visibility. Defaults to True.", "x-example": True, }, "CREATE_NAMESPACE_ON_PUSH": { "type": "boolean", "description": "Whether new push to a non-existent organization creates it. Defaults to False.", "x-example": False, }, # Allow first user to be initialized via API "FEATURE_USER_INITIALIZE": { "type": "boolean", "description": "If set to true, the first User account may be created via API /api/v1/user/initialize", "x-example": False, }, # OCI artifact types "ALLOWED_OCI_ARTIFACT_TYPES": { "type": "object", "description": "The set of allowed OCI artifact mimetypes and the assiciated layer types", "x-example": { "application/vnd.cncf.helm.config.v1+json": ["application/tar+gzip"], "application/vnd.sylabs.sif.config.v1+json": [ "application/vnd.sylabs.sif.layer.v1.sif" ], }, }, }, }
# example file for submodule imports def divide_me_by_2(x): return x/2
class Solution: def connect(self, root): nodes = [[root], []] x = 0 while (nodes[0] and nodes[0][0]) or (nodes[1] and nodes[1][0]): for i in range(len(nodes[x])): nodes[x][i].next = None if i == len(nodes[x]) - 1 else nodes[x][i+1] nodes[(1 + x) % 2].append(nodes[x][i].left) nodes[(1 + x) % 2].append(nodes[x][i].right) nodes[x] = [] x = (1 + x) % 2 return root
CELERY_TIMEZONE = 'Europe/Rome' # The backend used to store task results CELERY_RESULT_BACKEND = 'rpc://' # If set to True, result messages will be persistent. This means the messages will not be lost after a broker restart CELERY_RESULT_PERSISTENT = True CELERY_ACCEPT_CONTENT=['json', 'pickle'] CELERY_TASK_SERIALIZER='json' CELERY_RESULT_SERIALIZER='json' # Broker settings. BROKER_URL = 'amqp://guest:guest@localhost:5672//' BROKER_HEARTBEAT = 10.0 BROKER_HEARTBEAT_CHECKRATE = 2.0 CELERY_IMPORTS = [ 'app_celery.tasks' ] CELERYD_STATE_DB = '/var/celery/db/state' # Enables error emails. CELERY_SEND_TASK_ERROR_EMAILS = True # Name and email addresses of recipients ADMINS = ( ('Administrator Name', 'admin@somedoamin.net'), ) # Email address used as sender (From field). SERVER_EMAIL = 'no-reply@somedomain.net' # Mailserver configuration EMAIL_HOST = 'localhost' EMAIL_PORT = 25 # Send events so the worker can be monitored by tools like celerymon. CELERY_SEND_EVENTS = True # If enabled the worker pool can be restarted using the pool_restart remote control command. CELERYD_POOL_RESTARTS = True
def checkIfMessagerIsBooster(self, user): """ Function would be called by Robot class :param self: instance from Robot :param user: instance from Discord.User :return: True if user is a booster """ for role in user.roles: if role == self.boostedRole: return True return False
class InputBroker: """Abstract class responsible for providing raw values when considering scores""" def get_input_value(self, consideration, context): raise NotImplementedError()
# colorcodingfor rows(...) def colornumber(color): if color == 'd': return 0 elif color == 'e': return 1 elif color == 'f': return 2 elif color == 'g': return 3 elif color == 'h': return 4 elif color == 'i': return 5 elif color == 'j': return 6 elif color == 'k': return 7 elif color == 'l': return 8 else: return 9
class Solution: def isIdealPermutation(self, A): """ :type A: List[int] :rtype: bool """ size, m = len(A), 0 for i in range(size - 2): m = max(m, A[i]) if m > A[i + 2]: return False return True
[ { 'date': '2018-01-01', 'description': "New Year's Day", 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF' }, { 'date': '2018-01-15', 'description': 'Birthday of Martin Luther King, Jr.', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-02-19', 'description': "Washington's Birthday", 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-04-16', 'description': "Patriots' Day", 'locale': 'en-US', 'notes': '', 'region': 'MA', 'type': 'V' }, { 'date': '2018-04-16', 'description': "Patriots' Day", 'locale': 'en-US', 'notes': '', 'region': 'ME', 'type': 'V' }, { 'date': '2018-05-28', 'description': 'Memorial Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-07-04', 'description': 'Independence Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF' }, { 'date': '2018-09-03', 'description': 'Labor Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-10-08', 'description': 'Columbus Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-11-11', 'description': 'Veterans Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NF' }, { 'date': '2018-11-22', 'description': 'Thanksgiving Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-11-23', 'description': 'Day after Thanksgiving', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NV' }, { 'date': '2018-12-24', 'description': 'Christmas Eve', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NRF' }, { 'date': '2018-12-25', 'description': 'Christmas Day', 'locale': 'en-US', 'notes': '', 'region': '', 'type': 'NRF' } ]
# A function to get the desired metrics while working with multiple model training procedures def print_classification_metrics(y_train, train_pred, y_test, test_pred, return_performance=True): dict_performance = {'Training Accuracy: ': accuracy_score(y_train, train_pred), 'Training f1-score: ': f1_score(y_train, train_pred), 'Accuracy: ': accuracy_score(y_test, test_pred), 'Precision: ': precision_score(y_test, test_pred), 'Recall: ': recall_score(y_test, test_pred), 'f1-score: ': f1_score(y_test, test_pred)} for key, value in dict_performance.items(): print("{} : {}".format(key, value)) if return_performance: return dict_performance
#!/usr/bin/env python def part_one(values: list[int]) -> int: count = sum(values[index] < values[index + 1] for index in range(len(values) - 1)) return count def part_two(values: list[int]) -> int: summed_list = list(sum(three) for three in zip(values, values[1:], values[2:])) count = sum(summed_list[index] < summed_list[index + 1] for index in range(len(summed_list) - 1)) return count if __name__ == '__main__': values_: list[int] = [int(row) for row in open("../../../input.txt").readlines()] print(part_one(values=values_)) print(part_two(values=values_))
"""Top-level package for Client 1C for Time Sheet.""" __author__ = """Nick K Sabinin""" __email__ = 'sabnk@optictelecom.ru' __version__ = '0.1.0'
# -*- coding: utf-8 -*- { 'name': "programme_fidelite", 'summary': """ fidelite programme """, 'description': """ programme de fidelité """, 'author': "emmanuel.kissi@progistack.com", 'website': "http://www.yourcompany.com", 'category': 'Uncategorized', 'version': '0.1', # any module necessary for this one to work correctly 'depends': ['base', 'point_of_sale', 'stock'], # always loaded 'data': [ 'security/ir.model.access.csv', 'views/programme_fidelite_view.xml', 'views/pos_template.xml', 'views/loyalty_field.xml' ], # only loaded in demonstration mode 'demo': [ ], 'qweb': [ 'static/src/xml/reward_button.xml', 'static/src/xml/loyalty_field1.xml', 'static/src/xml/show_loyalty_popup.xml', 'static/src/xml/loyalty_receipt.xml', ], }
# Usage: gunicorn ProductCatalog.wsgi --bind 0.0.0.0:$PORT --config deploy/gunicorn.conf.py # Max number of pending connections. backlog = 1024 # Number of workers spawned for request handling. workers = 1 # Standard type of workers. worker_class = 'sync' # Kill worker if it does not notify the master process in this number of seconds. timeout = 30 # Log file location. logfile = '/var/log/productcatalog-gunicorn.log' # The granularity of log output. loglevel = 'info'
# NOTE: This objects are used directly in the external-notification-data and vulnerability-service # on the frontend, so be careful with changing their existing keys. PRIORITY_LEVELS = { "Unknown": { "title": "Unknown", "value": "Unknown", "index": 5, "level": "info", "color": "#9B9B9B", "score": 0, "description": "Unknown is either a security problem that has not been assigned to a priority" + " yet or a priority that our system did not recognize", "banner_required": False, }, "Negligible": { "title": "Negligible", "value": "Negligible", "index": 4, "level": "info", "color": "#9B9B9B", "score": 1, "description": "Negligible is technically a security problem, but is only theoretical " + "in nature, requires a very special situation, has almost no install base, " + "or does no real damage.", "banner_required": False, }, "Low": { "title": "Low", "value": "Low", "index": 3, "level": "warning", "color": "#F8CA1C", "score": 3, "description": "Low is a security problem, but is hard to exploit due to environment, " + "requires a user-assisted attack, a small install base, or does very little" + " damage.", "banner_required": False, }, "Medium": { "title": "Medium", "value": "Medium", "index": 2, "level": "warning", "color": "#FCA657", "score": 6, "description": "Medium is a real security problem, and is exploitable for many people. " + "Includes network daemon denial of service attacks, cross-site scripting, and " + "gaining user privileges.", "banner_required": False, }, "High": { "title": "High", "value": "High", "index": 1, "level": "warning", "color": "#F77454", "score": 9, "description": "High is a real problem, exploitable for many people in a default " + "installation. Includes serious remote denial of services, local root " + "privilege escalations, or data loss.", "banner_required": False, }, "Critical": { "title": "Critical", "value": "Critical", "index": 0, "level": "error", "color": "#D64456", "score": 10, "description": "Critical is a world-burning problem, exploitable for nearly all people in " + "a installation of the package. Includes remote root privilege escalations, " + "or massive data loss.", "banner_required": False, }, } def get_priority_for_index(index): try: int_index = int(index) except ValueError: return "Unknown" for priority in PRIORITY_LEVELS: if PRIORITY_LEVELS[priority]["index"] == int_index: return priority return "Unknown" def get_priority_from_cvssscore(score): try: if 0 < score < 4: return PRIORITY_LEVELS["Low"]["value"] if 4 <= score < 7: return PRIORITY_LEVELS["Medium"]["value"] if 7 <= score < 9: return PRIORITY_LEVELS["High"]["value"] if 9 <= score < 10: return PRIORITY_LEVELS["Critical"]["value"] except ValueError: return "Unknown" return "Unknown" def fetch_vuln_severity(vuln, enrichments): if ( vuln["normalized_severity"] and vuln["normalized_severity"] != PRIORITY_LEVELS["Unknown"]["value"] ): return vuln["normalized_severity"] if enrichments.get(vuln["id"], {}).get("baseScore", None): return get_priority_from_cvssscore(enrichments[vuln["id"]]["baseScore"]) return PRIORITY_LEVELS["Unknown"]["value"]
# Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def sumEvenGrandparent(self, root): """ :type root: TreeNode :rtype: int """ res = [0] self.traverse(root, None, None, res) return res[0] def traverse(self, node, pnode, gpnode, sum_even): if node: if gpnode and gpnode.val % 2 == 0: sum_even[0] += node.val if node.left: self.traverse(node.left, node, pnode, sum_even) if node.right: self.traverse(node.right, node, pnode, sum_even) def test_sum_even_grand_parent(): a = TreeNode(6) b = TreeNode(7) c = TreeNode(8) a.left = b a.right = c d = TreeNode(2) e = TreeNode(7) b.left = d b.right = e f = TreeNode(1) h = TreeNode(3) c.left = f c.right = h i = TreeNode(9) j = TreeNode(1) k = TreeNode(4) m = TreeNode(5) d.left = i e.left = j e.right = k h.right = m s = Solution() assert 18 == s.sumEvenGrandparent(a)
class solution: def findNumbers(self, nums=[]): even = 0 for num in nums: numString = str(num) if len(numString) % 2 == 0: even += 1 return even if __name__ == "__main__": sol = solution() _ = [int(n) for n in input().split()] print(sol.findNumbers(_))
pressure_arr = [80, 90, 100, 150, 120, 110, 160, 110, 100] sum = 0 for pressure in pressure_arr: sum = pressure + sum length = len(pressure_arr) mean = sum / length print("The mean is", mean)
n = int(input()) families = map(int, input().split()) families = sorted(families) for i in range(len(families)): if(i!=len(families)-1): if(families[i]!=families[i - 1] and families[i]!=families[i + 1]): print(families[i]) break else: print(families[i])
class Solution: def expand(self, S: str) -> List[str]: return sorted(self.dfs(S, [''])) def dfs(self, s, prev): if not s: return prev n = len(s) cur = '' found = False result = [] for i in range(n): if s[i].isalpha(): cur += s[i] continue if s[i] == '{': found = True start = i break added = [] for sub in prev: added.append(sub + cur) if not found: return added end = s.find('}') chars = s[start + 1: end].split(',') arr = [] for sub in added: for ch in chars: arr.append(sub + ch) # print(s[end + 1: ]) return self.dfs(s[end + 1: ], arr)
def translate(data, char, replacement): result = data.replace(char, replacement) print(result) return result def includes(data, string): if string in data: return True return False def start(data, string): counter = 0 is_it = False for char in string: if char == data[counter]: counter += 1 is_it = True continue else: is_it = False break return is_it def findindex(data, char): for i in range(0, len(data)): if char == data[i]: last_inedx = i return last_inedx def remove(data, start_index, count): start_index = int(start_index) count = int(count) stop_index = int(start_index) + int(count) if len(data) > stop_index: data = data[0: start_index:] + data[stop_index + 0::] print(data) return data data = input() command = input() while command != "End": command = command.split() if command[0] == "Lowercase": data = data.lower() print(data) command = input() continue elif len(command) == 2: act = command[0] a = command[1] elif len(command) == 3: act = command[0] a = command[1] b = command[2] if act == "Translate": data = translate(data, a, b) elif act == "Includes": print(includes(data, a)) elif act == "Start": print(start(data, a)) elif act == "FindIndex": print(findindex(data, a)) elif act == "Remove": data = remove(data, a, b) command = input()
L = 25 with open('input') as f: nums = list(map(int, f.read().split())) # Part 1 for i in range(L, len(nums)): pre = nums[i - L:i] n = nums[i] d = {} for p in pre: if p in d and p != d[p]: break d[n - p] = p else: print(n) break # Part 2 i = 0 j = 2 while j < len(nums): cont = nums[i:j] s = sum(cont) if s > n: i += 1 elif s < n or j - i < 2: j += 1 else: print(min(cont) + max(cont)) break
class UnexpectedMode(ValueError): def __init__(self, mode: str) -> None: super().__init__( f"Unexpected mode - found '{mode}' but must be 'image' or 'mesh'" )
def area(base, altura): return base*altura/2 base = float(input('Medida de un lado')) altura = float(input('Altura relativa a ese lado')) print(f'El area del triángulo es de {area(base, altura)} unidades')
# -*- coding: utf-8 -*- """ Jaccard Index Implementation @author: AniruddhaMaheshDave """ def jaccard_index(str1, str2, n_gram = 2): """ Computes the Jaccard Index between two strings `str1` and `str2`. #TODO : Write details about Jaccard Index """ if str1 == str2: return 1 len1, len2 = len(str1), len(str2) if (len1 == 0) or (len2 == 0): return 0 first_set = set() second_set = set() for i in range(len1 - 1): if (' ' not in str1[i:i+n_gram] and len(str1[i:i+n_gram]) == n_gram): first_set.add(str1[i:i+n_gram]) for i in range(len2 - 1): if (' ' not in str2[i:i+n_gram] and len(str2[i:i+n_gram]) == n_gram): second_set.add(str2[i:i+n_gram]) if first_set and second_set: intersection_cardinality = len(first_set.intersection(second_set)) union_cardinality = len(first_set.union(second_set)) return intersection_cardinality/float(union_cardinality) else: raise Exception("No n-grams found. Choose a lower value of n_gram")
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. { 'includes': [ '../../../../../../common_settings.gypi', # Common settings ], 'targets': [ { 'target_name': 'iLBC', 'type': '<(library)', 'dependencies': [ '../../../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl', ], 'include_dirs': [ '../interface', ], 'direct_dependent_settings': { 'include_dirs': [ '../interface', ], }, 'sources': [ '../interface/ilbc.h', 'abs_quant.c', 'abs_quant_loop.c', 'augmented_cb_corr.c', 'bw_expand.c', 'cb_construct.c', 'cb_mem_energy.c', 'cb_mem_energy_augmentation.c', 'cb_mem_energy_calc.c', 'cb_search.c', 'cb_search_core.c', 'cb_update_best_index.c', 'chebyshev.c', 'comp_corr.c', 'constants.c', 'create_augmented_vec.c', 'decode.c', 'decode_residual.c', 'decoder_interpolate_lsf.c', 'do_plc.c', 'encode.c', 'energy_inverse.c', 'enh_upsample.c', 'enhancer.c', 'enhancer_interface.c', 'filtered_cb_vecs.c', 'frame_classify.c', 'gain_dequant.c', 'gain_quant.c', 'get_cd_vec.c', 'get_lsp_poly.c', 'get_sync_seq.c', 'hp_input.c', 'hp_output.c', 'ilbc.c', 'index_conv_dec.c', 'index_conv_enc.c', 'init_decode.c', 'init_encode.c', 'interpolate.c', 'interpolate_samples.c', 'lpc_encode.c', 'lsf_check.c', 'lsf_interpolate_to_poly_dec.c', 'lsf_interpolate_to_poly_enc.c', 'lsf_to_lsp.c', 'lsf_to_poly.c', 'lsp_to_lsf.c', 'my_corr.c', 'nearest_neighbor.c', 'pack_bits.c', 'poly_to_lsf.c', 'poly_to_lsp.c', 'refiner.c', 'simple_interpolate_lsf.c', 'simple_lpc_analysis.c', 'simple_lsf_dequant.c', 'simple_lsf_quant.c', 'smooth.c', 'smooth_out_data.c', 'sort_sq.c', 'split_vq.c', 'state_construct.c', 'state_search.c', 'swap_bytes.c', 'unpack_bits.c', 'vq3.c', 'vq4.c', 'window32_w32.c', 'xcorr_coef.c', 'abs_quant.h', 'abs_quant_loop.h', 'augmented_cb_corr.h', 'bw_expand.h', 'cb_construct.h', 'cb_mem_energy.h', 'cb_mem_energy_augmentation.h', 'cb_mem_energy_calc.h', 'cb_search.h', 'cb_search_core.h', 'cb_update_best_index.h', 'chebyshev.h', 'comp_corr.h', 'constants.h', 'create_augmented_vec.h', 'decode.h', 'decode_residual.h', 'decoder_interpolate_lsf.h', 'do_plc.h', 'encode.h', 'energy_inverse.h', 'enh_upsample.h', 'enhancer.h', 'enhancer_interface.h', 'filtered_cb_vecs.h', 'frame_classify.h', 'gain_dequant.h', 'gain_quant.h', 'get_cd_vec.h', 'get_lsp_poly.h', 'get_sync_seq.h', 'hp_input.h', 'hp_output.h', 'defines.h', 'index_conv_dec.h', 'index_conv_enc.h', 'init_decode.h', 'init_encode.h', 'interpolate.h', 'interpolate_samples.h', 'lpc_encode.h', 'lsf_check.h', 'lsf_interpolate_to_poly_dec.h', 'lsf_interpolate_to_poly_enc.h', 'lsf_to_lsp.h', 'lsf_to_poly.h', 'lsp_to_lsf.h', 'my_corr.h', 'nearest_neighbor.h', 'pack_bits.h', 'poly_to_lsf.h', 'poly_to_lsp.h', 'refiner.h', 'simple_interpolate_lsf.h', 'simple_lpc_analysis.h', 'simple_lsf_dequant.h', 'simple_lsf_quant.h', 'smooth.h', 'smooth_out_data.h', 'sort_sq.h', 'split_vq.h', 'state_construct.h', 'state_search.h', 'swap_bytes.h', 'unpack_bits.h', 'vq3.h', 'vq4.h', 'window32_w32.h', 'xcorr_coef.h', ], }, ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include".split(';') if "/home/lzh/racecar_ws/devel/include;/home/lzh/racecar_ws/src/navigation-melodic-devel/dwa_local_planner/include" != "" else [] PROJECT_CATKIN_DEPENDS = "base_local_planner;dynamic_reconfigure;nav_msgs;pluginlib;sensor_msgs;roscpp;tf2;tf2_ros".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldwa_local_planner".split(';') if "-ldwa_local_planner" != "" else [] PROJECT_NAME = "dwa_local_planner" PROJECT_SPACE_DIR = "/home/lzh/racecar_ws/devel" PROJECT_VERSION = "1.16.7"
nome = input("Digite seu nome ").strip().lower() confirmacao = 'silva' in nome print(f"Seu nome tem silva {confirmacao}")
# Exercício 2.3 nome = input("olá! Informe seu nome:") print(nome) # Exercício 2.4 a = 3 b = 5 print(2*a + 3*b) # Exercício 2.5 a = 3 b = 7 c = 4 print(a+b+c) # Exercício 2.6 salário = 750 aumento = 0.15 sal_real = (salário+(salário*aumento)) print(sal_real)
# ***************************** # Environment specific settings # ***************************** # DO NOT use "DEBUG = True" in production environments DEBUG = True # DO NOT use Unsecure Secrets in production environments # Generate a safe one with: # python -c "import os; print repr(os.urandom(24));" SECRET_KEY = ( 'This is an UNSECURE Secret. CHANGE THIS for production environments.' ) # SQLAlchemy settings SQLALCHEMY_DATABASE_URI = 'sqlite:///../app.sqlite' SQLALCHEMY_TRACK_MODIFICATIONS = False # Avoids a SQLAlchemy Warning
""" Given a column title as appear in an Excel sheet, return its corresponding column number. For example: A -> 1 B -> 2 C -> 3 ... Z -> 26 AA -> 27 AB -> 28 ... Example 1: Input: "A" Output: 1 Example 2: Input: "AB" Output: 28 Example 3: Input: "ZY" Output: 701 """ class Solution: def titleToNumber(self, s: str) -> int: ret = 0 for c in s: n = ord(c) - ord('A') + 1 ret = ret * 26 + n return ret
# coding: utf-8 # # Functions (1) - Creating Functions # In this lesson we're going to learn about functions in Python. Functions are an important tool when programming and their use can be very complex. It's not the aim of this course to teach you how to implement functional programming, instead, this lesson will give you a grounding in how functions work and an insight into how we can use them to help us create charts with Plotly. # # ## What is a function? # # A function is a block of code which is used to perform a single action. A function should be reusable, and it should behave predictably. We have already used several built-in functions, such as <code>print()</code> and <code>len()</code>, but Python also allows you to create user-defined functions. # # ## How to create a function # # The syntax of creating a function is relatively straightforward. We first need to tell Python that we're going to define a function using the <code>def</code> keyword; we must then give the function a name followed by some parentheses (<code> () </code>) and a colon. Function names have the same restrictions as variable names (can't start with a number, can only contain letters, numbers and underscores). After the function name has been defined, any code within the function is indented by four spaces (or a tab): # ````python # def <function name>(): # <code to run> # ```` # # In the cell below, I'm defining a function which prints the string <code>"This is a function"</code> every time it is called: # In[10]: def testFunction(): print("This is a function") # When we have defined a function, we can call the function as we would call any built-in function that we have already used, remembering to include the parentheses: # In[11]: testFunction() # ## Using arguments in a function # # When we use the <code>len()</code> function, we have to tell that function which object we want the length of. We are passing that object as an argument to the function: # In[12]: len("abcdefg") # We can do the same with user-defined functions. To do so, we create the function as normal, but inside the parentheses we can put argument names. We can put as many as we like, but each must be separated by a comma: # ````python # def <function name>(<arg1>, <arg2>, . . . <argN>): # <code to run> # ```` # # We can then reference these arguments inside the function. In the cell below, I've written a function which prints out two items. Notice that I've converted each item to a string using the <code>str()</code> function - this ensures that the function behaves predictably - without converting an integer to a string, the code wouldn't run. # In[13]: def testFunction2(item1, item2): print("The first item is: " + str(item1) + ", the second item is: " + str(item2)) # We can then use this function an pass arguments to it: # In[14]: testFunction2('abc', 20) # The function will create a different output if we pass different arguments to it. This is because the arguments which are passed to a function only endure for the duration of that function. # In[15]: testFunction2('howdy', 'partner') # ## Returning objects from a function # # Functions are useful when we use them to create or modify an object. Variables which are created inside a function are not available to the rest of the code, unless we return them (or specifically declare them to be <a href="http://stackoverflow.com/questions/423379/using-global-variables-in-a-function-other-than-the-one-that-created-them">global variables</a>) # # We can return an object created inside a function by using the return keyword; we must assign the output of a function to an object and we cannot write any more code after the return statment. # # In the cell below, I create a function which takes returns a list of alternating valus. This function takes three arguments, two of which are the values to alternate, whilst the third is the number of times they must be repeated: # In[16]: def alternateList(item1, item2, repeats): alternate = [item1, item2] altRepeat = alternate * repeats return altRepeat # Because the function returns a value we must assign the output that is return to a variable: # In[17]: repeated1 = alternateList(5, 50, 3) # There are two variables created inside this function; <code>alternate</code> and <code>altRepeat</code>. These variables exist only within the function and we cannot access them in open code: # In[18]: print(alternate) # In[19]: print(altRepeat) # But because we returned the value of the variable <code>altRepeat</code>, creating a new variable with that value, we can now see what the function <code>alternateList()</code> has created: # In[20]: repeated1 # We can return two or more variables from a function by separating each variable with a comma. We must assign each to an object: # In[21]: def alternateList(item1, item2, repeats): alternate = [item1, item2] altRepeat = alternate * repeats return alternate, altRepeat pair, rpt = alternateList(77, 99, 5) print(pair) # In[22]: print(rpt) # ### What have we learnt this lesson? # In this lesson we've learnt how to define a function using the <code>def</code> keyword, and how to pass arguments to the function. We've seen that these arguments only hold their value within the function, and that we can use a return statement to return one or more values from within the function. # # In the next lesson we'll look at how we can use functions to help us make our charts. # If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
# This module is from mx/DateTime/LazyModule.py and is # distributed under the terms of the eGenix.com Public License Agreement # https://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf """ Helper to enable simple lazy module import. 'Lazy' means the actual import is deferred until an attribute is requested from the module's namespace. This has the advantage of allowing all imports to be done at the top of a script (in a prominent and visible place) without having a great impact on startup time. Copyright (c) 1999-2005, Marc-Andre Lemburg; mailto:mal@lemburg.com See the documentation for further information on copyrights, or contact the author. All Rights Reserved. """ ### Constants _debug = 0 ### class LazyModule: """Lazy module class. Lazy modules are imported into the given namespaces whenever a non-special attribute (there are some attributes like __doc__ that class instances handle without calling __getattr__) is requested. The module is then registered under the given name in locals usually replacing the import wrapper instance. The import itself is done using globals as global namespace. Example of creating a lazy load module: ISO = LazyModule('ISO',locals(),globals()) Later, requesting an attribute from ISO will load the module automatically into the locals() namespace, overriding the LazyModule instance: t = ISO.Week(1998,1,1) """ # Flag which indicates whether the LazyModule is initialized or not __lazymodule_init = 0 # Name of the module to load __lazymodule_name = "" # Flag which indicates whether the module was loaded or not __lazymodule_loaded = 0 # Locals dictionary where to register the module __lazymodule_locals = None # Globals dictionary to use for the module import __lazymodule_globals = None def __init__(self, name, locals, globals=None): """Create a LazyModule instance wrapping module name. The module will later on be registered in locals under the given module name. globals is optional and defaults to locals. """ self.__lazymodule_locals = locals if globals is None: globals = locals self.__lazymodule_globals = globals mainname = globals.get("__name__", "") if mainname: self.__name__ = mainname + "." + name self.__lazymodule_name = name else: self.__name__ = self.__lazymodule_name = name self.__lazymodule_init = 1 def __lazymodule_import(self): """Import the module now.""" # Load and register module name = self.__lazymodule_name if self.__lazymodule_loaded: return self.__lazymodule_locals[name] if _debug: print("LazyModule: Loading module %r" % name) self.__lazymodule_locals[name] = module = __import__( name, self.__lazymodule_locals, self.__lazymodule_globals, "*" ) # Fill namespace with all symbols from original module to # provide faster access. self.__dict__.update(module.__dict__) # Set import flag self.__dict__["__lazymodule_loaded"] = 1 if _debug: print("LazyModule: Module %r loaded" % name) return module def __getattr__(self, name): """Import the module on demand and get the attribute.""" if self.__lazymodule_loaded: raise AttributeError(name) if _debug: print( "LazyModule: " "Module load triggered by attribute %r read access" % name ) module = self.__lazymodule_import() return getattr(module, name) def __setattr__(self, name, value): """Import the module on demand and set the attribute.""" if not self.__lazymodule_init: self.__dict__[name] = value return if self.__lazymodule_loaded: self.__lazymodule_locals[self.__lazymodule_name] = value self.__dict__[name] = value return if _debug: print( "LazyModule: " "Module load triggered by attribute %r write access" % name ) module = self.__lazymodule_import() setattr(module, name, value) def __repr__(self): return "<LazyModule '%s'>" % self.__name__
stack = [] stack.append('a') stack.append('b') stack.append('c') print('Initial stack') print(stack) print('\nElements poped from stack:') print(stack.pop()) print(stack.pop()) print(stack.pop()) print('\nStack after elements are poped:') print(stack)
#!/usr/bin/env python # -*- coding':' utf-8 -*- # Hiswelókë's Sindarin dictionary # Compiled, edited and annotated by Didier Willis # https://www.jrrvf.com/hisweloke/sindar/online/sindar/dict-en-sd.html dict_words = { 'abandon':'awartha', 'abandonment':'awarth', 'abhor':'fuia', 'abhorrence':'delos', 'abhorrent':'thaur', 'abiding':'him', 'abominable':'deleb', 'abound':'ovra', 'about':'o', 'above':'or', 'abroad':'palan', 'abundant':'ovor', 'abyss':'dath', 'account':'pennas', 'acre':'rîdh', 'across':'ath', 'acute':'laeg', 'afar':'palan', 'affect':'presta', 'affected':'prestannen', 'affection':'mîl', 'affliction':'caul', 'after':'ab', 'again':'ad', 'against':'dan', 'age':'andrann', 'aged':'iphant', 'agile':'celeg', 'ago':'io', 'ah':'ai', 'air':'gwelu', 'alas':'nae', 'alive':'cuin', 'all':'pân', 'allegiance':'buia', 'alone':'er', 'alphabet':'angerthas', 'amputate':'osgar', 'anchorage':'lorn', 'ancient':'iaur', 'and':'a', 'anger':'rûth', 'angle':'bennas', 'animal':'lavan', 'annal':'ínias', 'anniversary':'edinor', 'answer':'dambeth', 'anybody':'pen', 'apparition':'auth', 'appear':'thia', 'apple':'cordof', 'approach':'anglenna', 'appropriate':'seidia', 'april':'gwirith', 'arch':'cû', 'area':'sad', 'arid':'apharch', 'arm':'ranc', 'around':'os', 'article':'bach', 'as':'sui', 'ash':'lith', 'ashen':'lithui', 'ashy':'lithui', 'assembly':'hûd', 'associate':'gwathel', 'at':'na', 'athwart':'thar', 'august':'urui', 'autumn':'iavas', 'awakening':'echui', 'awe':'anwar', 'awful':'taur', 'axe':'hast', 'babble':'glavra', 'babbling':'glavrol', 'babe':'laes', 'baby':'gwinig', 'back':'ad', 'bad':'faeg', 'bald':'rûdh', 'ball':'coron', 'bane':'dagnir', 'bank':'duirro', 'bark':'rîf', 'barrow':'gorthad', 'batter':'blab', 'battle':'auth', 'bay':'côf', 'be':'na', 'beach':'falas', 'beacon':'narthan', 'bear':'brôg', 'beard':'fang', 'bearer':'cyll', 'beat':'blab', 'beautiful':'bain', 'bed':'haust', 'bedridden':'caeleb', 'beech':'brethil', 'beget':'edonna', 'begin':'heria', 'behind':'adel', 'behold':'alae', 'bell':'nell', 'bellowing':'glam', 'beneath':'di', 'bent':'cûn', 'beside':'ar', 'betray':'gweria', 'betrayer':'gwarth', 'between':'mîn', 'beyond':'athan', 'bind':'gwedh', 'biped':'tad-dal', 'birch':'brethil', 'bird':'aew', 'bite':'nag', 'biting':'naeth', 'bitter':'saer', 'black':'morn', 'blackthorn':'toss', 'blade':'hathol', 'blessedness':'galu', 'blessings':'galu', 'bliss':'manadh', 'blocked':'tafnen', 'blood':'agar', 'bloodstained':'agarwaen', 'blossom':'edlothia', 'blossoming':'edlothiad', 'blow':'dram', 'blue':'elu', 'board':'pân', 'boat':'lunt', 'body':'rhaw', 'bold':'beren', 'bolster':'nedhu', 'bond':'gwaedh', 'book':'parf', 'border':'edrain', 'born':'onnen', 'boss':'dolt', 'bound':'gleina', 'boundary':'gland', 'bow':'cû', 'bowed':'cûn', 'bowstring':'tang', 'branch':'golf', 'brand':'ylf', 'bread':'basgorn', 'break':'breitha', 'breath':'hwest', 'breathe':'thuia', 'breeze':'hwest', 'bride':'dineth', 'bridegroom':'daer', 'bridge':'iant', 'bright':'glân', 'brilliance':'aglar', 'brilliant':'celair', 'bring':'tog', 'broad':'land', 'broadsword':'hathol', 'brooch':'tachol', 'broth':'salph', 'brother':'gwador', 'brown':'baran', 'bud':'tuiw', 'builder':'thavron', 'building':'adab', 'bull':'mund', 'burden':'caul', 'bush':'toss', 'butterfly':'gwilwileth', 'by':'an', 'cairn':'sarnas', 'cake':'cram', 'calendar':'genediad', 'call':'can', 'calligrapher':'tegilbor', 'camp':'echad', 'canopy':'daedelu', 'cape':'bund', 'carpenter':'thavron', 'catch':'gad', 'cave':'fela', 'cavern':'gath', 'cavernous':'raudh', 'centre':'ened', 'cessation':'post', 'chain':'angwedh', 'chair':'ham', 'chamber':'sam', 'chant':'linnathon', 'charm':'lûth', 'chaser':'rŷn', 'cheat':'gweria', 'child':'hên', 'circle':'echor', 'circular':'rend', 'citadel':'ost', 'city':'caras', 'clamor':'caun', 'clan':'noss', 'clasp':'taew', 'claw':'gamp', 'clean':'puig', 'clear':'lim', 'cleared':'laden', 'clearing':'lant', 'cleaver':'crist', 'cleft':'cîl', 'clever':'maen', 'cloak':'coll', 'closed':'hollen', 'clothe':'hab', 'clothing':'hammad', 'cloud':'fain', 'cloudy':'fanui', 'club':'grond', 'coat':'heleth', 'cobweb':'lhing', 'coin':'canath', 'cold':'helch', 'come':'tol', 'commanding':'conui', 'compact':'gowest', 'compulsion':'thang', 'conceal':'delia', 'concealed':'dolen', 'conceive':'nautha', 'concerning':'o', 'confirm':'tangada', 'conquer':'orthor', 'contorted':'norn', 'contract':'gowest', 'contrivance':'gaud', 'control':'tortha', 'converse':'athrabeth', 'cool':'him', 'copper':'gaer', 'cord':'nordh', 'core':'ened', 'corn':'iau', 'corner':'bennas', 'corpse':'daen', 'corrupt':'thaw', 'counsel':'gûr', 'count':'gonod', 'countenance':'thîr', 'countless':'arnediad', 'course':'rant', 'courtyard':'pand', 'cover':'esgal', 'crabbed':'norn', 'craft':'curu', 'creature':'raug', 'crescent':'cû', 'crest':'amloth', 'crook':'gamp', 'crooked':'raeg', 'cross':'athrada', 'crossing':'athrad', 'crow':'corch', 'crowd':'hoth', 'crown':'rî', 'crowned':'rîn', 'cruel':'balch', 'cry':'can', 'cunning':'coru', 'curse':'rhach', 'cushion':'nedhu', 'custody':'band', 'custom':'haew', 'cut':'osgar', 'cutlass':'lang', 'cycle':'andrann', 'daddy':'ada', 'dagger':'sigil', 'daily':'ilaurui', 'daisy':'eirien', 'damp':'nîd', 'dangle':'gling', 'dare':'bertha', 'dark':'doll', 'darkness':'dúath', 'dart':'hador', 'daughter':'iell', 'dauntless':'thalion', 'dawn':'minuial', 'day':'arad', 'daylight':'calan', 'daytime':'arad', 'dead':'fern', 'deadly':'delu', 'deal':'maetha', 'dear':'mell', 'death':'gûr', 'debate':'athrabeth', 'december':'girithron', 'declivity':'pend', 'decripit':'gern', 'deed':'carth', 'deep':'nûr', 'deer':'aras', 'defile':'aglonn', 'dell':'im', 'delving':'groth', 'demon':'raug', 'denial':'ubed', 'depart':'gwanna', 'departed':'gwann', 'deprive':'neitha', 'deprived':'neithan', 'descendant':'ion', 'desert':'eru', 'desire':'aníra', 'detestation':'delos', 'device':'gaud', 'dew':'mîdh', 'diacritic':'gasdil', 'die':'gwanna', 'dim':'gwathra', 'dimness':'dû', 'din':'glam', 'dirty':'gwaur', 'disgust':'del', 'distance':'haered', 'distant':'hae', 'distinct':'minai', 'district':'trann', 'disturb':'presta', 'divinity':'balan', 'do':'car', 'doer':'ceredir', 'dog':'hû', 'doing':'cared', 'dome':'telu', 'doom':'amarth', 'doomed':'barad', 'door':'annon', 'doorway':'fennas', 'dot':'peg', 'double':'edaid', 'dough':'moeas', 'dove':'cugu', 'down':'dad', 'downhill':'dadbenn', 'downs':'penneth', 'downwards':'dad', 'dragon':'amlug', 'drain':'sautha', 'draught':'suith', 'dread':'achas', 'dreadful':'gaer', 'dream':'ôl', 'drear':'muil', 'drink':'sog', 'drunk':'sogannen', 'dry':'apharch', 'dungeon':'gador', 'Dunlendings':'gwathuirim', 'duress':'band', 'dusk':'tinnu', 'dusky':'doll', 'dust':'ast', 'dwarf':'anfang', 'dwell':'dortha', 'dwelling':'bar', 'eager':'bara', 'eagle':'thôr', 'ear':'lhaw', 'earth':'amar', 'earthen':'cefn', 'east':'amrûn', 'eastern':'rhúnen', 'eat':'mad', 'eavesdrop':'lathra', 'eavesdropper':'lathron', 'ebb':'dannen', 'echo':'glamor', 'echoing':'glamren', 'eddy':'hwinia', 'edge':'lanc', 'eight':'toloth', 'eighth':'tollui', 'elder':'einior', 'elephant':'annabon', 'eleven':'mimp', 'elf':'avar', 'elm':'lalf', 'elvish':'edhellen', 'embers':'iûl', 'eminent':'orchal', 'employ':'iuitha', 'empty':'cofn', 'encampment':'estolad', 'enchant':'lútha', 'encircling':'echor', 'enclose':'gleina', 'enclosure':'cerin', 'encouragement':'hûl', 'end':'meth', 'endless':'arnediad', 'endurance':'bronwe', 'endure':'brenia', 'enduring':'bronadui', 'enemy':'coth', 'enfold':'gwaeda', 'enlaced':'raen', 'enlarge':'panna', 'enmesh':'gonathra', 'enmity':'coth', 'enough':'far', 'ent':'onod', 'entangle':'gonathra', 'entanglement':'gonathras', 'enter':'minna', 'envelope':'ui', 'errant':'raun', 'error':'mist', 'especial':'edregol', 'establish':'tangada', 'estuary':'ethir', 'eternal':'uireb', 'eternity':'uir', 'evening':'aduial', 'ever':'ui', 'evil':'ogol', 'exalted':'arth', 'excavate':'rosta', 'excavation':'groth', 'exclamation':'elo', 'excluded':'said', 'exile':'edledhia', 'exiled':'edlenn', 'expression':'thîr', 'eye':'hen', 'face':'nîf', 'fade':'thinna', 'fading':'peleth', 'faintness':'hwîn', 'fair':'bain', 'faith':'bronwe', 'faithful':'sador', 'fall':'danna', 'fallen':'dannen', 'falling':'talt', 'fallow':'maidh', 'family':'noss', 'fane':'iaun', 'fang':'carch', 'fantastic':'hwiniol', 'far':'hae', 'fashion':'echad', 'fast':'avorn', 'fasten':'taetha', 'fat':'tûg', 'fate':'amarth', 'fated':'amarthan', 'father':'ada', 'fathom':'raew', 'fawn':'maidh', 'fear':'achas', 'feast':'mereth', 'feat':'carth', 'february':'nínui', 'feel':'plada', 'fell':'delu', 'female':'inu', 'fence':'cail', 'fenced':'thoren', 'fenland':'lô', 'festival':'mereth', 'festive':'meren', 'fetch':'toltha', 'field':'parth', 'fierce':'braig', 'fierceness':'bregolas', 'fiery':'bara', 'fifth':'lefnui', 'fight':'maeth', 'filament':'lhê', 'fill':'panna', 'final':'methen', 'fine':'brand', 'finger':'emig', 'fire':'naur', 'firm':'tanc', 'firmament':'menel', 'first':'erui', 'fish':'hâl', 'fist':'dond', 'fit':'maer', 'five':'leben', 'fix':'penia', 'flame':'lach', 'flap':'blab', 'flat':'talu', 'flee':'drega', 'flesh':'rhaw', 'float':'loda', 'flood':'duinen', 'floor':'pân', 'flow':'rib', 'flower':'alfirin', 'flowering':'edlothiad', 'flowing':'cell', 'fly':'revia', 'foam':'falf', 'foe':'gûd', 'fog':'hîth', 'foggy':'hethu', 'follow':'aphad', 'follower':'aphadon', 'food':'aes', 'foot':'pôd', 'footprint':'rein', 'footstool':'tharas', 'for':'an', 'ford':'athrad', 'forest':'taur', 'forester':'tauron', 'forgive':'díhena', 'forked':'thanc', 'formation':'dírnaith', 'formed':'cadu', 'former':'iaur', 'forsake':'awartha', 'forsaken':'eglan', 'fort':'garth', 'forth':'ed', 'fortress':'barad', 'fortune':'galu', 'four':'canad', 'fourth':'canthui', 'fox':'rusc', 'frail':'mîw', 'free':'lain', 'freed':'lain', 'freeing':'leithian', 'frequent':'laew', 'fresh':'cîw', 'friend':'elvellon', 'friendly':'milui', 'friendship':'gwend', 'frog':'cabor', 'from':'o', 'front':'nîf', 'full':'pant', 'fullness':'pathred', 'fungus':'hwand', 'fur':'heleth', 'gap':'dîn', 'garden':'sant', 'garland':'rî', 'garment':'hamp', 'gate':'annon', 'gateway':'fennas', 'gay':'gelir', 'gaze':'tir', 'gazed':'tíriel', 'gazing':'tiriel', 'generous':'fael', 'germ':'eredh', 'giddiness':'hwîn', 'giddy':'hwiniol', 'gift':'ant', 'girdle':'lest', 'girl':'iell', 'give':'anna', 'giver':'oneth', 'gladden':'ninglor', 'glance':'glintha', 'glass':'cenedril', 'gleam':'glîn', 'glimmering':'gael', 'glint':'glîn', 'glisten':'thilia', 'glittering':'galad', 'globe':'coron', 'globed':'corn', 'gloom':'daw', 'gloomy':'dem', 'glorify':'egleria', 'glorious':'aglareb', 'glory':'aglar', 'goblet':'sûl', 'goblin':'orch', 'gold':'côl', 'golden':'mallen', 'good':'maer', 'goose':'gwaun', 'gore':'naith', 'gorge':'cabed', 'grass':'thâr', 'grassland':'nan', 'grave':'haudh', 'gravel':'brith', 'great':'beleg', 'greedy':'melch', 'green':'calen', 'greet':'suila', 'greeting':'suil', 'grey':'mith', 'gross':'host', 'ground':'talaf', 'grow':'gala', 'growth':'galas', 'guard':'tirith', 'guarded':'tirnen', 'guess':'inc', 'gulf':'iâ', 'gull':'gwael', 'habit':'haew', 'hack':'hasta', 'hair':'fast', 'half':'per', 'halfling':'perian', 'hall':'tham', 'halt':'daro', 'hammer':'dam', 'hand':'cam', 'handed':'crumui', 'handle':'maetha', 'handy':'maed', 'hang':'gling', 'happy':'gelir', 'harass':'trasta', 'harbour':'hûb', 'harbourage':'hobas', 'hard':'norn', 'harp':'ganna', 'harper':'talagan', 'hasp':'taew', 'hassock':'tharas', 'hasty':'celeg', 'hat':'carab', 'hateful':'delu', 'have':'gar', 'haven':'círbann', 'hawthorn':'toss', 'he':'e', 'head':'dôl', 'headland':'cast', 'heal':'nesta', 'healing':'nestad', 'heap':'cum', 'hearer':'lathron', 'heart':'gûr', 'heat':'brass', 'heaven':'menel', 'heavy':'long', 'hedge':'cai', 'height':'taen', 'heir':'hîl', 'helm':'thôl', 'helmet':'harn', 'hem':'glân', 'herb':'athelas', 'here':'sí', 'hero':'callon', 'hew':'draf', 'hewn':'drafn', 'hidden':'dolen', 'hideous':'uanui', 'high':'ara', 'hill':'amon', 'hindmost':'tele', 'his':'în', 'historical':'gobennathren', 'history':'gobennas', 'hobbit':'perian', 'hold':'gar', 'holder':'taew', 'hole':'dath', 'hollow':'coll', 'holly':'ereg', 'holy':'aer', 'home':'bar', 'honey':'glî', 'honeycomb':'nîdh', 'hook':'gamp', 'hop':'laba', 'hope':'amdir', 'horde':'hoth', 'horn':'rafn', 'horrible':'deleb', 'horror':'del', 'horse':'lobor', 'host':'gwaith', 'hot':'born', 'hound':'rŷn', 'house':'adab', 'household':'herth', 'howl':'gaul', 'howling':'gawad', 'huge':'taur', 'human':'echil', 'hummock':'gwastar', 'hump':'tump', 'hunt':'fara', 'hunter':'faradrim', 'hunting':'farad', 'hurl':'had', 'husband':'hervenn', 'I':'im', 'ice':'heleg', 'icicle':'aeglos', 'idea':'inc', 'ill':'lhaew', 'immortal':'alfirin', 'impelled':'horn', 'impetuous':'alag', 'impetus':'gorf', 'in':'mîn', 'incline':'talad', 'inclined':'dadbenn', 'index':'emig', 'inflorescence':'goloth', 'innumerable':'arnediad', 'insecure':'talt', 'insert':'nestag', 'inside':'nedh', 'instead':'sennui', 'insult':'eitha', 'intelligence':'hannas', 'intelligent':'hand', 'intend':'thel', 'iron':'ang', 'island':'tol', 'isle':'tol', 'isolated':'ereb', 'it':'ha', 'january':'narwain', 'jaw':'anc', 'jerk':'rinc', 'jewel':'mîr', 'journey':'lend', 'joy':'gell', 'joyous':'meren', 'jubilation':'gellam', 'judge':'badhor', 'judgement':'baudh', 'juice':'paich', 'juicy':'pihen', 'july':'cerveth', 'june':'nórui', 'just':'fael', 'keen':'laeg', 'keep':'heb', 'kind':'milui', 'kindle':'nartha', 'kindler':'toniel', 'kindred':'noss', 'king':'âr', 'kingdom':'aranarth', 'kingfisher':'heledir', 'kingsfoil':'athelas', 'kinsman':'gwanur', 'kinswoman':'gwanur', 'knife':'sigil', 'knob':'dolt', 'knock':'tamma', 'knot':'nardh', 'knotted':'norn', 'knowledge':'gûl', 'labour':'muda', 'lacking':'pen', 'lady':'brennil', 'lair':'caew', 'lake':'ael', 'lament':'naergon', 'lamentable':'naer', 'lamentation':'conath', 'lamp':'calar', 'lampwright':'calardan', 'land':'dôr', 'language':'annúnaid', 'last':'brona', 'lasting':'bronadui', 'later':'ab', 'laugh':'gladh', 'laughter':'lalaith', 'lay':'glaer', 'lead':'tog', 'leaf':'galenas', 'league':'daur', 'lean':'lhain', 'leap':'cab', 'learned':'golwen', 'leather':'lath', 'left':'crom', 'letter':'têw', 'level':'land', 'lick':'lav', 'life':'cuil', 'lift':'hal', 'light':'calad', 'like':'sui', 'limit':'gleina', 'line':'taeg', 'link':'lif', 'lion':'raw', 'listen':'lasta', 'listener':'lathron', 'little':'pîn', 'live':'cuinar', 'load':'cûl', 'loaf':'basgorn', 'loathing':'del', 'loathsome':'deleb', 'lock':'fing', 'lode':'rant', 'lofty':'arth', 'log':'drafn', 'lonely':'ereb', 'long':'anann', 'look':'thîr', 'loose':'adleg', 'lord':'brannon', 'lore':'angol', 'loud':'brui', 'love':'mel', 'lovely':'melui', 'lover':'melethril', 'loving':'milui', 'low':'tofn', 'lowlying':'tofn', 'lust':'mael', 'lustful':'maelui', 'machine':'gaud', 'magic':'angol', 'magician':'gollor', 'maid':'elleth', 'make':'caro', 'maker':'ceredir', 'making':'cared', 'male':'anu', 'man':'abonnen', 'manage':'maetha', 'manhood':'gwaith', 'mantle':'coll', 'many':'laew', 'maple':'toss', 'march':'gwaeron', 'mark':'andaith', 'master':'herdir', 'mastery':'tûr', 'maxim':'linnod', 'May':'lothron', 'mayor':'condir', 'me':'enni', 'meagre':'lhain', 'mean':'faeg', 'meaning':'ind', 'meat':'aes', 'meet':'govad', 'mere':'ael', 'merry':'gelir', 'mesh':'rem', 'met':'govannen', 'metal':'côl', 'middle':'ened', 'mighty':'beleg', 'military':'dírnaith', 'mine':'sabar', 'mirror':'cenedril', 'mist':'hîth', 'mocking':'iaew', 'moisten':'limmid', 'money':'mirian', 'monster':'úan', 'monstrous':'uanui', 'month':'cerveth', 'moon':'cúron', 'morning':'aur', 'mortal':'fair', 'mother':'emel', 'mound':'cerin', 'mount':'amon', 'mountain':'aegas', 'mountaineer':'orodben', 'mouth':'ethir', 'move':'rinc', 'mummy':'nana', 'muscle':'tû', 'mutated':'prestannen', 'mutation':'prestanneth', 'my':'nín', 'myself':'anim', 'nail':'taes', 'naked':'hell', 'name':'eneth', 'narrator':'pethron', 'narrow':'agor', 'neat':'puig', 'neck':'achad', 'necklace':'sigil', 'necromancy':'gûl', 'need':'baur', 'neighbour':'samarad', 'net':'gwî', 'netted':'remmen', 'nettled':'raen', 'new':'cîr', 'news':'siniath', 'night':'daw', 'nightfall':'dû', 'nightingale':'dúlinn', 'nine':'neder', 'ninth':'nedrui', 'no':'al', 'noble':'ara', 'noise':'glam', 'noisy':'brui', 'noose':'nŷw', 'north':'forn', 'northern':'forodren', 'nose':'bund', 'not':'al', 'notion':'inc', 'novel':'sinnarn', 'november':'hithui', 'now':'si', 'number':'gwanod', 'numberless':'arnediad', 'numerous':'rem', 'oak':'doron', 'oath':'gwaedh', 'oblique':'adlant', 'obscure':'doll', 'obstinate':'tarlanc', 'occasion':'lû', 'ocean':'gaearon', 'october':'narbeleth', 'odour':'ûl', 'of':'ned', 'off':'ego', 'old':'brûn', 'older':'iaur', 'on':'bo', 'one':'er', 'open':'edra', 'opening':'dîn', 'oppress':'baugla', 'oppression':'thang', 'oppressive':'baug', 'oppressor':'bauglir', 'or':'egor', 'orc':'glam', 'orient':'amrûn', 'our':'mín', 'out':'ed', 'outcry':'caun', 'outline':'cant', 'over':'or', 'overshadow':'gwathra', 'overwhelming':'taur', 'own':'garn', 'pain':'naeg', 'pale':'gael', 'palisade':'cail', 'pallor':'niphred', 'palm':'camlann', 'parent':'odhril', 'pass':'aglonn', 'passage':'pendrath', 'pasture':'nadhor', 'path':'lond', 'pathway':'bâd', 'pause':'daur', 'peace':'sîdh', 'peak':'aegas', 'pedlar':'bachor', 'pen':'tegil', 'penetrating':'maeg', 'people':'gwaith', 'permission':'dâf', 'petty':'niben', 'pick':'leutha', 'piercing':'maeg', 'pilgrim':'randír', 'pillar':'thafn', 'pillow':'pesseg', 'pimpernel':'elanor', 'pin':'tachol', 'pine':'thôn', 'pinion':'roval', 'pipe':'galenas', 'pippin':'cordof', 'pit':'dath', 'pivot':'pelthaes', 'place':'dôr', 'plain':'lad', 'plane':'talath', 'plank':'pân', 'plant':'aeglos', 'platform':'talan', 'play':'ganna', 'poem':'glaer', 'point':'aeg', 'pointed':'megor', 'poison':'saew', 'pollen':'mâl', 'pondering':'idhren', 'pool':'ael', 'poor':'faeg', 'poplar':'tulus', 'post':'tagol', 'potter':'cennan', 'powder':'mâl', 'power':'balan', 'praise':'egleria', 'precipice':'rhass', 'prick':'eitha', 'prickle':'erch', 'prince':'caun', 'prison':'band', 'private':'said', 'prohibition':'ablad', 'promontory':'naith', 'prone':'dadbenn', 'prop':'tulu', 'property':'garn', 'protect':'beria', 'province':'ardhon', 'puddle':'both', 'puff':'hwest', 'purpose':'thel', 'quarrel':'cost', 'quarter':'canath', 'queen':'bereth', 'quench':'luithia', 'quenching':'luithiad', 'quiet':'tîn', 'quite':'far', 'race':'nûr', 'radiance':'galad', 'radiant':'faen', 'rain':'ross', 'rainbow':'eiliant', 'raise':'ortha', 'range':'lîr', 'ransom':'danwedh', 'rapid':'lagor', 'rat':'nâr', 'rather':'sennui', 'raven':'craban', 'ravine':'cirith', 'readiness':'hûr', 'realm':'ardh', 'reap':'critha', 'rear':'adel', 'recite':'glir', 'reckon':'genedia', 'reckoning':'genediad', 'recount':'trenar', 'red':'born', 'reek':'osp', 'reflection':'galad', 'refusal':'ablad', 'refuser':'avar', 'regiment':'gwaith', 'region':'ardh', 'release':'adleg', 'reluctance':'avad', 'remain':'dar', 'remembrance':'rîn', 'remote':'hae', 'rend':'narcha', 'renewal':'cîl', 'reply':'dangweth', 'repose':'îdh', 'resolve':'thel', 'resonant':'tong', 'respite':'post', 'response':'dambeth', 'rest':'îdh', 'retain':'heb', 'reunion':'aderthad', 'reunite':'adertha', 'reuniting':'aderthad', 'ride':'nor', 'rider':'rochon', 'ridge':'ceber', 'right':'fair', 'rigid':'tharn', 'ringlet':'laws', 'rip':'rista', 'rise':'eria', 'rising':'orthad', 'river':'celon', 'riverbed':'rant', 'road':'men', 'rock':'gond', 'roof':'orthel', 'roofing':'tobas', 'root':'solch', 'rope':'hithlain', 'rose':'meril', 'rotten':'thaw', 'round':'corn', 'row':'lîr', 'royal':'ara', 'ruddy':'crann', 'ruler':'caun', 'ruling':'conui', 'run':'nor', 'rune':'angerthas', 'running':'cell', 'rushing':'alag', 'rustling':'lhoss', 'sad':'dem', 'safe':'band', 'saga':'narn', 'sail':'revia', 'sailor':'cirion', 'salve':'glaew', 'sanctuary':'iaun', 'sand':'lith', 'sapless':'tharn', 'saving':'edraith', 'say':'ped', 'scion':'ion', 'scorn':'eitha', 'scratch':'rhib', 'screen':'esgal', 'sea':'aear', 'seashell':'half', 'season':'echuir', 'seaweed':'gaeruil', 'second':'edwen', 'secret':'thurin', 'see':'cen', 'seed':'cordof', 'seeing':'cened', 'seem':'thia', 'seing':'tírad', 'sensible':'noen', 'separate':'said', 'september':'ivanneth', 'serpent':'lhûg', 'serve':'buia', 'set':'penia', 'seven':'odog', 'seventh':'ochui', 'shade':'gwath', 'shadow':'dae', 'shadowed':'hall', 'shadowy':'gwathren', 'shady':'hall', 'shape':'auth', 'shaped':'cadu', 'shapely':'cadwor', 'shaping':'cannas', 'sharp':'laeg', 'shaven':'paran', 'she':'he', 'shield':'amath', 'shine':'síla', 'ship':'cair', 'shipbuilder':'círdan', 'shipman':'cirion', 'shipwright':'círdan', 'shire':'trann', 'shoe':'habad', 'shore':'esgar', 'short':'estent', 'shortness':'thinnas', 'shout':'can', 'shouting':'glam', 'shudder':'gir', 'shuddering':'girith', 'sick':'caeleb', 'sickle':'cerch', 'sickly':'gem', 'sickness':'cael', 'side':'forvo', 'sight':'cened', 'sign':'andaith', 'silence':'dîn', 'silent':'dínen', 'silmaril':'golovir', 'silver':'celeb', 'simbelmynë':'uilos', 'sin':'úgarth', 'sinew':'tû', 'sing':'glir', 'single':'er', 'sister':'gwathel', 'sit':'haf', 'six':'eneg', 'sixth':'enchui', 'skill':'curu', 'skilled':'maed', 'skin':'flâd', 'sky':'menel', 'slain':'dangen', 'slant':'adlanna', 'slanting':'adlant', 'slave':'mûl', 'slay':'dag', 'slayer':'dagnir', 'slender':'fim', 'slim':'fim', 'sling':'hadlath', 'slipping':'talt', 'slope':'adlanna', 'sloping':'adlann', 'slot':'rein', 'small':'mîw', 'smell':'thost', 'smith':'mírdan', 'smoke':'osp', 'smooth':'paran', 'snake':'lhûg', 'snatch':'ritha', 'snout':'bund', 'snow':'gloss', 'snowdrop':'nínim', 'snowthorn':'aeglos', 'snowy':'lossen', 'soap':'glûdh', 'socket':'taew', 'soft':'moe', 'soil':'cef', 'soiled':'gwaur', 'soldier':'daug', 'sole':'tellen', 'sombre':'dûr', 'somebody':'pen', 'son':'ion', 'song':'aerlinn', 'sorcery':'gûl', 'sound':'nella', 'soup':'salph', 'source':'celu', 'south':'harad', 'southern':'dúven', 'southerners':'haradrim', 'sow':'redh', 'space':'land', 'spark':'geil', 'sparkling':'lim', 'speak':'ped', 'spear':'ecthel', 'spearhead':'naith', 'spearpoint':'aith', 'speech':'glam', 'speed':'hortha', 'spell':'lûth', 'spider':'lhing', 'spike':'caraes', 'spindrift':'gwing', 'spine':'ech', 'spirit':'faer', 'spit':'puia', 'splendour':'aglar', 'split':'thanc', 'sponge':'hwand', 'spoor':'rein', 'sport':'telien', 'spot':'peg', 'spouse':'bereth', 'spray':'gwing', 'spread':'pelia', 'spring':'celu', 'sprout':'tuia', 'spy':'ethir', 'stab':'eitha', 'stain':'gwass', 'stained':'gwaen', 'stairway':'pendrath', 'stake':'ceber', 'stalwart':'thala', 'staple':'taew', 'star':'êl', 'starlight':'gilgalad', 'starry':'elenath', 'stay':'dar', 'staying':'avorn', 'steadfast':'him', 'steady':'thala', 'steep':'baradh', 'stem':'telch', 'stench':'angol', 'stick':'nasta', 'sticky':'hîw', 'stiff':'dorn', 'stiffness':'tarias', 'stink':'thosta', 'stirrup':'talraph', 'stone':'edhelharn', 'stop':'dar', 'stopgap':'gasdil', 'stopped':'tafnen', 'stopper':'dîl', 'stopping':'dîl', 'storm':'alagos', 'straight':'taer', 'strait':'lond', 'stray':'mista', 'straying':'mistad', 'street':'othrad', 'strength':'bellas', 'stroke':'dram', 'strong':'belt', 'stronghold':'ost', 'study':'gûl', 'stuffing':'dîl', 'stunt':'nuitha', 'stunted':'naug', 'sublime':'taur', 'sudden':'bragol', 'suddenness':'breged', 'suffice':'feira', 'sufficient':'far', 'sum':'gonod', 'summer':'laer', 'summit':'taen', 'summon':'toltha', 'sun':'Anor', 'sunlight':'aur', 'sunny':'nórui', 'sunset':'annûn', 'superior':'orchal', 'support':'tulu', 'surface':'palath', 'survival':'bronad', 'survive':'brona', 'swallow':'tuilinn', 'swamped':'loen', 'swan':'alph', 'sward':'parth', 'swart':'baran', 'swarthy':'donn', 'swear':'gwesta', 'sweet':'lend', 'swell':'tuia', 'swift':'celeg', 'swiftly':'lim', 'swooping':'thôr', 'sword':'crist', 'swordsman':'magor', 'syrup':'paich', 'tale':'gwanod', 'tall':'orchal', 'tangled':'remmen', 'task':'tass', 'taut':'tong', 'tear':'nîn', 'tearful':'nîd', 'tell':'nara', 'temptation':'úthaes', 'ten':'cae', 'tenth':'caenen', 'terrify':'gruitha', 'terrifying':'goeol', 'terror':'goe', 'thatch':'taus', 'the':'i', 'thee':'le', 'them':'ti', 'there':'ennas', 'thick':'tûg', 'thin':'lhain', 'thing':'bach', 'third':'nail', 'thirsty':'faug', 'thirtieth':'nelchaenen', 'this':'sen', 'thong':'lath', 'thorn':'êg', 'thought':'ind', 'thoughtful':'idhren', 'thoughtfulness':'idhor', 'thousand':'meneg', 'thrall':'mûl', 'thread':'hithlain', 'three':'nêl', 'threshold':'fen', 'throat':'lanc', 'through':'godref', 'thrower':'hador', 'thrust':'nasta', 'thumb':'atheg', 'thy':'lín', 'tide':'dannen', 'tidings':'siniath', 'tidy':'puig', 'tie':'nod', 'tight':'tong', 'tilted':'adlann', 'time':'anann', 'tiny':'mîw', 'to':'an', 'today':'sír', 'together':'go', 'toil':'muda', 'tomb':'haudh', 'tongue':'lam', 'tooth':'anc', 'top':'caw', 'torment':'baul', 'torrent':'oll', 'tough':'dorn', 'toughness':'tarias', 'towards':'an', 'tower':'barad', 'town':'gobel', 'track':'bâd', 'trade':'banga', 'trample':'batha', 'traverse':'athrada', 'treasure':'mîr', 'treaty':'gowest', 'tree':'brethil', 'trespass':'úgarth', 'tress':'fîn', 'tressure':'cathrae', 'triangle':'naith', 'trick':'rinc', 'trill':'glir', 'triumph':'gell', 'triumphant':'gellui', 'troll':'torog', 'troop':'gwaith', 'troth':'gwaedh', 'trouble':'presta', 'true':'thand', 'trumpet':'rom', 'trust':'estel', 'trusty':'tolog', 'tune':'lind', 'tuneful':'lend', 'tunnel':'groth', 'turf':'sâdh', 'twelve':'imp', 'twilight':'tinnu', 'twin':'gwanûn', 'twirl':'hwinia', 'twirling':'hwind', 'twisted':'norn', 'twitch':'rinc', 'two':'tâd', 'tyrannous':'baug', 'tyrant':'bauglir', 'under':'di', 'understand':'henia', 'understanding':'hannas', 'union':'erthad', 'unique':'minai', 'unite':'ertha', 'uniting':'erthad', 'unquenchable':'uluithiad', 'untamed':'rhaw', 'up':'am', 'uphill':'ambenn', 'upon':'am', 'uproar':'glam', 'upwards':'am', 'urge':'hortha', 'us':'ammen', 'use':'iuith', 'useful':'maer', 'vague':'hethu', 'vale':'im', 'valley':'imlad', 'valour':'caun', 'vassal':'bôr', 'vast':'taur', 'veil':'esgal', 'veiled':'hall', 'vein':'rant', 'vengeance':'acharn', 'verse':'ann-thennath', 'vessel':'calph', 'victory':'tûr', 'vigilance':'tirith', 'vigour':'gorf', 'village':'gobel', 'vine':'gwîn', 'violence':'breged', 'violent':'asgar', 'virgin':'rodwen', 'virginity':'gweneth', 'viscous':'hîw', 'voice':'conath', 'void':'cofn', 'wain':'rach', 'wait':'dar', 'walk':'pada', 'wall':'ram', 'wander':'revia', 'wanderer':'randír', 'wandering':'mist', 'war':'auth', 'ware':'bach', 'warm':'laug', 'warrior':'daug', 'wash':'iôl', 'waste':'eru', 'watch':'tir', 'watcher':'tirn', 'water':'lorn', 'waterfall':'lanthir', 'waterland':'nen', 'watery':'nend', 'way':'athrad', 'weary':'lom', 'weaver':'nathron', 'web':'gwî', 'webster':'nathron', 'wedge':'naith', 'weed':'galenas', 'week':'lefnar', 'weeping':'nîr', 'well':'eithel', 'werewolf':'gaur', 'west':'annûn', 'western':'annui', 'westmansweed':'galenas', 'westron':'annúnaid', 'wet':'limp', 'what':'man', 'when':'ir', 'whirl':'hwinia', 'whirling':'hwind', 'whisper':'lhoss', 'white':'brassen', 'whiten':'nimmid', 'who':'ai', 'wicked':'ogol', 'wide':'land', 'wield':'maetha', 'wife':'bess', 'wild':'braig', 'wilderness':'gwaith', 'will':'innas', 'willow':'tathar', 'wily':'coru', 'wind':'gwaew', 'window':'henneth', 'windy':'gwaeren', 'wine':'gwîn', 'wing':'rafn', 'winter':'rhîw', 'wise':'goll', 'wish':'iest', 'with':'ah', 'withered':'tharn', 'withering':'peleth', 'without':'ar', 'wizard':'curunír', 'woe':'naeth', 'wolf':'draug', 'woman':'adaneth', 'wood':'eryn', 'wooden':'tawaren', 'woodpecker':'tavor', 'wool':'taw', 'woollen':'taw', 'word':'peth', 'world':'ardhon', 'worn':'gern', 'wose':'drû', 'wound':'harna', 'wounded':'harn', 'woven':'remmen', 'wreath':'rî', 'wright':'thavron', 'wrist':'molif', 'write':'teitha', 'wrong':'neitha', 'wronged':'neithan', 'yard':'sant', 'year':'ennin', 'yelling':'glam', 'yellow':'malen', 'yoke':'ianu', 'young':'neth', 'youth':'nîth', }
"""Static values for one way import.""" SUPPORTED_FORMATS = (".svg", ".jpeg", ".jpg", ".png", ".tiff", ".tif") HTML_LINK = '<link rel="{rel}" type="{type}" href="{href}" />' ICON_TYPES = ( {"image_fmt": "ico", "rel": None, "dimensions": (64, 64), "prefix": "favicon"}, {"image_fmt": "png", "rel": "icon", "dimensions": (16, 16), "prefix": "favicon"}, {"image_fmt": "png", "rel": "icon", "dimensions": (32, 32), "prefix": "favicon"}, {"image_fmt": "png", "rel": "icon", "dimensions": (64, 64), "prefix": "favicon"}, {"image_fmt": "png", "rel": "icon", "dimensions": (96, 96), "prefix": "favicon"}, {"image_fmt": "png", "rel": "icon", "dimensions": (180, 180), "prefix": "favicon"}, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (57, 57), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (60, 60), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (72, 72), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (76, 76), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (114, 114), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (120, 120), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (144, 144), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (152, 152), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (167, 167), "prefix": "apple-touch-icon", }, { "image_fmt": "png", "rel": "apple-touch-icon", "dimensions": (180, 180), "prefix": "apple-touch-icon", }, {"image_fmt": "png", "rel": None, "dimensions": (70, 70), "prefix": "mstile"}, {"image_fmt": "png", "rel": None, "dimensions": (270, 270), "prefix": "mstile"}, {"image_fmt": "png", "rel": None, "dimensions": (310, 310), "prefix": "mstile"}, {"image_fmt": "png", "rel": None, "dimensions": (310, 150), "prefix": "mstile"}, {"image_fmt": "png", "rel": "shortcut icon", "dimensions": (196, 196), "prefix": "favicon"}, )
def findLongestSubSeq(str): n = len(str) dp = [[0 for k in range(n+1)] for l in range(n+1)] for i in range(1, n+1): for j in range(1, n+1): # If characters match and indices are not same if (str[i-1] == str[j-1] and i != j): dp[i][j] = 1 + dp[i-1][j-1] # If characters do not match else: dp[i][j] = max(dp[i][j-1], dp[i-1][j]) return dp[n][n]
""" [8/8/2012] Challenge #86 [easy] (run-length encoding) https://www.reddit.com/r/dailyprogrammer/comments/xxbbo/882012_challenge_86_easy_runlength_encoding/ Run-Length encoding is a simple form of compression that detects 'runs' of repeated instances of a symbol in a string and compresses them to a list of pairs of 'symbol' 'length'. For example, the string "Heeeeelllllooooo nurse!" Could be compressed using run-length encoding to the list of pairs [(1,'H'),(5,'e'),(5,'l'),(5,'o'),(1,'n'),(1,'u'),(1,'r'),(1,'s'),(1,'e')] Which seems to not be compressed, but if you represent it as an array of 18bytes (each pair is 2 bytes), then we save 5 bytes of space compressing this string. Write a function that takes in a string and returns a run-length-encoding of that string. (either as a list of pairs or as a 2-byte-per pair array) BONUS: Write a decompression function that takes in the RLE representation and returns the original string """ def main(): pass if __name__ == "__main__": main()
# Beispielprogramm für das Buch "Python Challenge" # # Copyright 2020 by Michael Inden def swap(values, first, second): value1 = values[first] value2 = values[second] values[first] = value2 values[second] = value1 def swap(values, first, second): tmp = values[first] values[first] = values[second] values[second] = tmp def find(values, search_for): for i in range(len(values)): if values[i] == search_for: return i return -1 def find(values, search_for): pos = 0 while pos < len(values) and not values[pos] == search_for: pos += 1 # i >= len(values) or values[i] == searchFor return -1 if pos >= len(values) else pos def find_with_enumerate(values, search_for): for i, value in enumerate(values): if value == search_for: return i return -1 def main(): pass if __name__ == "__main__": main()
@singleton class Database: def __init__(self): print('Loading database')
#!/usr/bin/env python3 def get_case_data(): return [int(i) for i in input().split()] # Using recursive implementation def get_gcd(a, b): return get_gcd(b, a % b) if b != 0 else a def print_number_or_ok_if_equals(number, guess): print("OK" if number == guess else number) number_of_cases = int(input()) for case in range(number_of_cases): first_integer, second_integer, proposed_gcd = get_case_data() real_gcd = get_gcd(first_integer, second_integer) print_number_or_ok_if_equals(real_gcd, proposed_gcd)
ACCOUNT_TYPES = [ 'Cuenta de ahorro', 'Cuenta vista', 'Cuenta corriente', 'Cuenta rut', ] BANK_NAMES = [ 'BANCO DE CHILE/EDWARDS CITI', 'BANCO ESTADO', 'SCOTIABANK', 'BCI', 'CORPBANCA', 'BICE', 'HSBC', 'SANTANDER', 'ITAU', 'THE BANK OF TOKYO-MITSUBISHI LTD.', 'SECURITY', 'BBVA', 'DEL DESARROLLO', 'FALABELLA', 'RIPLEY', 'BANCO CONSORCIO', 'BANCO PARIS', 'COOPEUCH', 'INTERNACIONAL', ] # BANK_NAMES2 = [ # 'ABN AMRO BANK (CHILE)', # 'BANCO BICE', # 'BANCO DE CHILE / EDWARDS', # 'BANCO DE CREDITO E INVERSIONES', # 'BANCO DEL DESARROLLO', # 'BANCO DEL ESTADO DE CHILE', # 'BANCO FALABELLA', # 'BANCO INTERNACIONAL', # 'BANCO ITAU CHILE', # 'BANCO RIPLEY', # 'BANCO SANTANDER-CHILE', # 'BANCO SECURITY', # 'CORPBANCA', # 'Caja de compensación Los Héroes', # 'Coopeuch (Cooperativa de Ahorro y Crédito)', # 'HNS BANCO', # 'SCOTIABANK', # ] RUT_LOWER_RANGE = 1000000 RUT_UPPER_RANGE = 22000000 ACCOUNT_NUMBER_LOWER_RANGE = 1000000 ACCOUNT_NUMBER_UPPER_RANGE = 99999999999999999999 BOOLEANS = [True, False] RUT_MESSAGES = ['', 'rut', 'run'] ACCOUNT_NUMBER_MESSAGES = ['', 'numero de cuenta', 'n', 'n˚ cuenta', 'n˚', 'cuenta'] BANK_NAME_MESSAGES = ['', 'banco'] ACCOUNT_TYPES_MESSAGES = ['', 'cuenta'] NAMES_OPTIONS = ['name/lastname', 'full-name'] BANK_OPTIONS = ['name/type', 'name-type'] ACCOUNT_TYPES_CARACTERS = ['', ' ', '-'] END_LINE = ['\n', ', ', ' ']
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def verticalTraversal(self, root: TreeNode) -> List[List[int]]: res = defaultdict(list) q = [(root, 0)] min_col = max_col = 0 while q: q.sort(key=lambda x: (x[1], x[0].val)) min_col = min(min_col, q[0][1]) max_col = max(max_col, q[-1][1]) prev_q, q = q, [] for node, col in prev_q: res[col].append(node.val) if node.left: q.append((node.left, col - 1)) if node.right: q.append((node.right, col + 1)) return [res[col] for col in range(min_col, max_col+1)]
# -*- coding: utf-8 -*- def main(): s = input() mod = '' for i in range(3): if s[i] == '1': mod += '9' elif s[i] == '9': mod += '1' print(mod) if __name__ == '__main__': main()
# Añadimos el template / clase generica class SportInfo: """Mostrar información de un deporte""" def __init__(self, name): print("Estamos creando un objeto") self.name = name def __del__(self): print("Estamos destruyendo un objeto: {}" .format(self.__class__.__name__)) # Añadimos los objetos asociados a esa clase run = SportInfo("Correr") print(run.name) # Correr print(run.__doc__) del run swim = SportInfo("Nadar") print(swim.name) # "" print(swim.__doc__) del swim print("Final")
# HyperLogLog - вероятностная структура данных для подсчёта количества уникальных элементов class HyperLogLog: def __init__(self, size): # size=32 self.buckets = [0] * size def fill_buckets(self, item): bin_hash = self.hash_value(item, length=32) buckets_index = int(str(bin_hash[:5]), 10) # берём первые 5 элементов хэша и переводим её в десятичное счисление total_zeros = self.count_zeros(bin_hash) if total_zeros > self.buckets[buckets_index]: # если количество нулей с конца больше, # чем то, что лежит в бакете по индексу, полученному от перевода части хэша в дестичную систему self.buckets[buckets_index] = total_zeros # заполняем бакет по индексу return self.buckets # получаем хэш в бинарном виде заданной длины при помощи ASCII кодов @classmethod def hash_value(cls, item, length): hashed = 0 for i, s in enumerate(item): encoded = (i + 1) * ord(s) hashed += encoded max_number = 2 ** length - 1 hashed = hashed % max_number bin_hash = bin(hashed)[2:] bin_hash = '0' * (length - len(bin_hash)) + bin_hash return bin_hash # считаем количество нулей в хэшэ с конца до тех пор, пока не встретится единица @classmethod def count_zeros(cls, bin_hash): total_zeros = 0 for i in range(len(bin_hash) - 1, -1, -1): if bin_hash[i] == 0: total_zeros += 1 else: break return total_zeros # считаем количество уникальных элементов - гармоническое среднее с поправкой на смещение @staticmethod def get_cardinality(buckets_filled): bias = 0.7942 amount = 0 for value in buckets_filled: amount += 1 / 2 ** -value cardinality = bias * len(buckets_filled) * (len(buckets_filled) / amount) return cardinality
class Node: def __init__(self,data): self.data=data self.next=None arr=[5,8,20] brr=[4,11,15] #inserting elements in first list list1=Node(arr[0]) root1=list1 for i in arr[1::]: temp=Node(i) list1.next=temp list1=list1.next #inserting elements in second list list2=Node(brr[0]) root2=list2 for i in brr[1::]: temp=Node(i) list2.next=temp list2=list2.next newlist=[] while(root1!=None and root2!=None): if(root1.data<root2.data): newlist.append(root1.data) root1=root1.next else: newlist.append(root2.data) root2=root2.next if(root1==None): if(root2==None): print(newlist) else: while(root2!=None): newlist.append(root2.data) root2=root2.next elif(root2==None): if(root1==None): print(newlist) else: while(root1!=None): newlist.append(root1.data) root1=root1.next print(newlist)
# Write your solution for 1.4 here! def is_prime(x): if x > 1: for i in range(2,x): if (x % i) == 0: print(x,"is not a prime number") print(i,"times",x//i,"is",x) else: print(x,"is not a prime number") is_prime(5)
class PluginMount(type): """Generic plugin mount point (= entry point) for pydifact plugins. .. note:: Plugins that have an **__omitted__** attriute are not added to the list! """ # thanks to Marty Alchin! def __init__(cls, name, bases, attrs): if not hasattr(cls, "plugins"): cls.plugins = [] else: if not getattr(cls, "__omitted__", False): cls.plugins.append(cls) class EDISyntaxError(SyntaxError): """A Syntax error within the parsed EDIFACT file was found."""
if __name__ == "__main__": print((lambda x,r : [r:=r+1 for i in x.split('\n\n') if all(map(lambda x : x in i,['byr','iyr','eyr','hgt','hcl','ecl','pid']))][-1])(open("i").read(),0)) def main_debug(inp): # 204 inp = inp.split('\n\n') rep = 0 for i in inp: if all(map(lambda x : x in i,['byr','iyr','eyr','hgt','hcl','ecl','pid'])): rep += 1 return rep
def flatten(iterable, result=None): if result == None: result = [] for it in iterable: if type(it) in (list, set, tuple): flatten(it, result) else: result.append(it) return [i for i in result if i is not None]
def palindromo(palavra: str) -> bool: if len(palavra) <= 1: return True primeira_letra = palavra[0] ultima_letra = palavra[-1] if primeira_letra != ultima_letra: return False return palindromo(palavra[1:-1]) nome_do_arquivo = input('Digite o nome do entrada de entrada: ') with open(nome_do_arquivo, 'r', encoding='utf8') as arquivo: for linha in arquivo: linha = linha.strip() palavras = linha.split() for palavra in palavras: if palindromo(palavra): print(palavra) # print(eh_primo('ama')) # print(eh_primo('socorrammesubinoonibusemmarroco'))
def main(): isNumber = False while not isNumber: try: size = int(input('Height: ')) if size > 0 and size <= 8: isNumber = True break except ValueError: isNumber = False build(size, size) def build(size, counter): spaces = size - 1 if size == 0: return 1 else: print(' ' * spaces, end='') print('#' * (counter - spaces), end=' ') print('#' * (counter - spaces), end='\n') return build(size - 1, counter) main()
#URLs ROOTURL = 'https://www.reuters.com/companies/' FXRATESURL = 'https://www.reuters.com/markets/currencies' #ADDURLs INCSTAT_ANN_URL = '/financials/income-statement-annual/' INCSTAT_QRT_URL = '/financials/income-statement-quarterly/' BS_ANN_URL = '/financials/balance-sheet-annual/' BS_QRT_URL = '/financials/balance-sheet-quarterly/' KEYMETRICS_URL = '/key-metrics/' #TABLENAMES STOCKDATA = 'stockdata' FXRATES = 'fxrates' INCSTAT_ANN = 'incstat_ann' INCSTAT_QRT = 'incstat_qrt' BS_ANN = 'bs_ann' BS_QRT = 'bs_qrt' KEYMETRICS = 'km' #TIMES YEARS = ['2015', '2016', '2017', '2018', '2019'] QRTS = ['2019Q2', '2019Q3', '2019Q4', '2020Q1', '2020Q2'] #DICTIONARIES ADDURLS_TO_TABLENAMES = { INCSTAT_ANN_URL: INCSTAT_ANN,\ INCSTAT_QRT_URL:INCSTAT_QRT,\ BS_ANN_URL: BS_ANN,\ BS_QRT_URL: BS_QRT, \ KEYMETRICS_URL: KEYMETRICS} TABLENAMES_TO_DATA = { INCSTAT_ANN: { 'Total Revenue' :[0, 'int64'],\ 'Net Income' : [0, 'int64']},\ INCSTAT_QRT:{ 'Total Revenue': [0, 'int64'],\ 'Net Income': [0, 'int64']},\ BS_ANN: { 'Total Equity' : [0, 'int64'],\ 'Total Liabilities' : [0, 'int64']},\ BS_QRT: { 'Total Equity' : [0, 'int64'],\ 'Total Liabilities' : [0, 'int64']}, \ KEYMETRICS: { 'Dividend (Per Share Annual)' : [0, 'float64' ],\ 'Free Cash Flow (Per Share TTM)' : [0, 'float64'],\ 'Current Ratio (Annual)' : [0, 'float64']} } FACTORS = { 'Mil': 1000000, 'Thousands': 1000} COLUMNHEADERSDICT_ANN = { "Unnamed: 0":"Item", \ "Unnamed: 1":YEARS[-1],\ "Unnamed: 2":YEARS[-2],\ "Unnamed: 3":YEARS[-3],\ "Unnamed: 4":YEARS[-4],\ "Unnamed: 5":YEARS[-5], \ 0:"Item", \ 1: YEARS[-1]} # Unnamed for financials - 0,1 for non-financials COLUMNHEADERSDICT_QRT = { "Unnamed: 0":"Item", \ "Unnamed: 1":QRTS[-1],\ "Unnamed: 2":QRTS[-2],\ "Unnamed: 3":QRTS[-3],\ "Unnamed: 4":QRTS[-4],\ "Unnamed: 5":QRTS[-5], \ 0:"Item", \ 1: YEARS[-1]} # Unnamed for financials - 0,1 for non-financials ISIN_TO_COUNTRIES = { 'US' : 'USA', 'DE' : 'Germany', 'GB' : 'UK', 'NL' : 'Netherlands', 'IE' : 'Ireland', 'FR' : 'France', 'CA' : 'Canada', 'CH' : 'Switzerland' } #PATHS RICSCSVPATH = "..\\data\\01_raw\\reuters-shorts.csv" RAWDATAPATH = "..\\data\\01_raw\\rawdatadb.db" INTDATAPATH = "..\\data\\02_intermediate\\intdatadb.db" PROCDATAPATH = '..\\data\\03_processed\\processeddata.feather' PROCDATAPATHCSV = '..\\data\\03_processed\\processeddata.csv' CURRENCIES = ['USD', 'EUR', 'GBP','CHF', 'INR']
DEFAULT_PORT = 9000 DEFAULT_SECURE_PORT = 9440 DBMS_MIN_REVISION_WITH_TEMPORARY_TABLES = 50264 DBMS_MIN_REVISION_WITH_TOTAL_ROWS_IN_PROGRESS = 51554 DBMS_MIN_REVISION_WITH_BLOCK_INFO = 51903 # Legacy above. DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032 DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058 DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060 DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372 DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401 DBMS_MIN_REVISION_WITH_SERVER_LOGS = 54406 DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA = 54410 DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420 DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429 # Timeouts DBMS_DEFAULT_CONNECT_TIMEOUT_SEC = 10 DBMS_DEFAULT_TIMEOUT_SEC = 300 DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC = 5 DEFAULT_COMPRESS_BLOCK_SIZE = 1048576 DEFAULT_INSERT_BLOCK_SIZE = 1048576 DBMS_NAME = 'ClickHouse' CLIENT_NAME = 'python-driver' CLIENT_VERSION_MAJOR = 18 CLIENT_VERSION_MINOR = 10 CLIENT_VERSION_PATCH = 3 CLIENT_REVISION = 54429 BUFFER_SIZE = 1048576 STRINGS_ENCODING = 'utf-8'
class Solution(object): def maxProfit(self, prices): """ :type prices: List[int] :rtype: int """ res=0 prev=None for x in prices: res += x-prev if prev!=None and prev<x else 0 prev = x return res
input_file = open("input.txt", "r") entriesArray = input_file.read().split("\n") depth_measure_increase = 0 for i in range(3, len(entriesArray), 1): first_window = int(entriesArray[i-1]) + int(entriesArray[i-2]) + int(entriesArray[i-3]) second_window = int(entriesArray[i]) + int(entriesArray[i-1]) + int(entriesArray[i-2]) if second_window > first_window: depth_measure_increase += 1 print(f'{depth_measure_increase=}')
# ©2018 The Arizona Board of Regents for and on behalf of Arizona State University and the Laboratory for Energy And Power Solutions, All Rights Reserved. # # Universal Power System Controller # USAID Middle East Water Security Initiative # # Developed by: Nathan Webster # Primary Investigator: Nathan Johnson # # Version History (mm_dd_yyyy) # 1.00 07_13_2018_NW # ###################################################### # VFD register values for reading and writing reg = { "WriteFunc":{ "Frequency_Set":269, "Frequency_Max":267, "Frequency_Min":268, "Frequency_Acc":270, "Motor_Start_Stop":8192 }, "ReadFunc":{ "Output_Frequency":4096, "Output_Voltage":4097, "Output_Current":4098, "Output_Speed":4119, "Output_Power":4120,#4106 "Bus_Voltage":4100, "Frequency_Set":269, "Temperature":4103 } } ''' VFD modbus value multipliers Freq = 100 Current = 100 Voltage = Actual Power = 10 '''
class Solution: def reverseString(self, s: List[str]) -> None: """ Do not return anything, modify s in-place instead. """ l , r = 0 , len(s)-1 while l<r: s[l] , s[r] = s[r] , s[l] l +=1 r -=1 return s