hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
acedab69b719bdf0d8a55a9eda641eda65e239ea
1,292
py
Python
scrapy_vietcorpus/vietcorpus/spiders/search_results_spider.py
garfieldnate/vi_experiments
43565130531a9d4fef5e9e90dd0d6cee56613622
[ "Apache-2.0" ]
1
2017-11-15T01:03:14.000Z
2017-11-15T01:03:14.000Z
scrapy_vietcorpus/vietcorpus/spiders/search_results_spider.py
garfieldnate/vi_experiments
43565130531a9d4fef5e9e90dd0d6cee56613622
[ "Apache-2.0" ]
null
null
null
scrapy_vietcorpus/vietcorpus/spiders/search_results_spider.py
garfieldnate/vi_experiments
43565130531a9d4fef5e9e90dd0d6cee56613622
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import datetime from os import path from urllib.parse import urlencode, urlparse, parse_qs import scrapy import redis from vietcorpus.items import SearchResultItem redis = redis.Redis() dir_path = path.dirname(path.realpath(__file__)) duckduckgo_url = "https://duckduckgo.com/html/?" class SearchResultsSpider(scrapy.Spider): name = 'duckduckgo_seeds' allowed_domains = ['duckduckgo'] query_file = path.realpath(f"{dir_path}/../../../web_corpus/wiki_keywords/queries_30000.txt") with open(query_file) as f: queries = f.read().splitlines() start_urls = [duckduckgo_url + urlencode({'q': q, 'kp': -2, # no safe search 'kd': -2 # don't use redirect URLs }) for q in queries] print(f"Created URLs from {query_file}; example formatted URL: {start_urls[0]}") def parse(self, response): for rank, result_selector in enumerate(response.css("#links .result__a").xpath('@href')): item = SearchResultItem() item['query'] = parse_qs(urlparse(response.url).query)['q'][0] item['rank'] = rank item['result_url'] = result_selector.extract() item['date'] = datetime.datetime.now() item['search_url'] = response.url yield item
31.512195
97
0.649381
acedab7195b80d82886fdbf695fcbb26550daf04
109
py
Python
0/functional_programming.py
JacobFV/Computatrum
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
[ "MIT" ]
null
null
null
0/functional_programming.py
JacobFV/Computatrum
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
[ "MIT" ]
null
null
null
0/functional_programming.py
JacobFV/Computatrum
6b9c324f4e0e73e8d7af79bb7785d0e86d26bc31
[ "MIT" ]
null
null
null
def sum_list(list, func): var sum = 0; for item in list: sum += func(item) return sum
21.8
26
0.541284
acedac25b64d4eeedb6482aa4e3c7b5fa82a249a
5,772
py
Python
pix2pixHD/train.py
vcarehuman/YoloPose
e4c63b75a0b7abaeb649d392f5fd601c8b037034
[ "BSD-3-Clause" ]
null
null
null
pix2pixHD/train.py
vcarehuman/YoloPose
e4c63b75a0b7abaeb649d392f5fd601c8b037034
[ "BSD-3-Clause" ]
null
null
null
pix2pixHD/train.py
vcarehuman/YoloPose
e4c63b75a0b7abaeb649d392f5fd601c8b037034
[ "BSD-3-Clause" ]
null
null
null
import time import os import numpy as np import torch from torch.autograd import Variable from collections import OrderedDict from subprocess import call import fractions def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0 from options.train_options import TrainOptions from data.data_loader import CreateDataLoader from models.models import create_model import util.util as util from util.visualizer import Visualizer opt = TrainOptions().parse() iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt') if opt.continue_train: try: start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int) except: start_epoch, epoch_iter = 1, 0 print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter)) else: start_epoch, epoch_iter = 1, 0 opt.print_freq = lcm(opt.print_freq, opt.batchSize) if opt.debug: opt.display_freq = 1 opt.print_freq = 1 opt.niter = 1 opt.niter_decay = 0 opt.max_dataset_size = 10 data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() dataset_size = len(data_loader) print('#training images = %d' % dataset_size) model = create_model(opt) visualizer = Visualizer(opt) if opt.fp16: from apex import amp model, [optimizer_G, optimizer_D] = amp.initialize(model, [model.optimizer_G, model.optimizer_D], opt_level='O1') model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids) else: optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D total_steps = (start_epoch-1) * dataset_size + epoch_iter display_delta = total_steps % opt.display_freq print_delta = total_steps % opt.print_freq save_delta = total_steps % opt.save_latest_freq for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() if epoch != start_epoch: epoch_iter = epoch_iter % dataset_size for i, data in enumerate(dataset, start=epoch_iter): if total_steps % opt.print_freq == print_delta: iter_start_time = time.time() total_steps += opt.batchSize epoch_iter += opt.batchSize # whether to collect output images save_fake = total_steps % opt.display_freq == display_delta ############## Forward Pass ###################### losses, generated = model(Variable(data['label']), Variable(data['inst']), Variable(data['image']), Variable(data['feat']), infer=save_fake) # sum per device losses losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ] loss_dict = dict(zip(model.module.loss_names, losses)) # calculate final loss scalar loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5 loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0) ############### Backward Pass #################### # update generator weights optimizer_G.zero_grad() if opt.fp16: with amp.scale_loss(loss_G, optimizer_G) as scaled_loss: scaled_loss.backward() else: loss_G.backward() optimizer_G.step() # update discriminator weights optimizer_D.zero_grad() if opt.fp16: with amp.scale_loss(loss_D, optimizer_D) as scaled_loss: scaled_loss.backward() else: loss_D.backward() optimizer_D.step() ############## Display results and errors ########## ### print out errors if total_steps % opt.print_freq == print_delta: errors = {k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items()} t = (time.time() - iter_start_time) / opt.print_freq visualizer.print_current_errors(epoch, epoch_iter, errors, t) visualizer.plot_current_errors(errors, total_steps) #call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"]) ### display output images if save_fake: visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)), ('synthesized_image', util.tensor2im(generated.data[0])), ('real_image', util.tensor2im(data['image'][0]))]) visualizer.display_current_results(visuals, epoch, total_steps) ### save latest model if total_steps % opt.save_latest_freq == save_delta: print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps)) model.module.save('latest') np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d') if epoch_iter >= dataset_size: break # end of epoch iter_end_time = time.time() print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) ### save model for this epoch if epoch % opt.save_epoch_freq == 0: print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) model.module.save('latest') model.module.save(epoch) np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d') ### instead of only training the local enhancer, train the entire network after certain iterations if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global): model.module.update_fixed_params() ### linearly decay learning rate after certain iterations if epoch > opt.niter: model.module.update_learning_rate()
40.647887
130
0.626473
acedad5e14cac1b236d6dd960e3aa701fb7c2e95
1,202
py
Python
utils/src/basic_operations.py
mjaglarz/BA-thesis
914c2615e368014a3a4dc4074f1419944f523828
[ "MIT" ]
1
2021-03-14T09:55:21.000Z
2021-03-14T09:55:21.000Z
utils/src/basic_operations.py
mjaglarz/BA-thesis
914c2615e368014a3a4dc4074f1419944f523828
[ "MIT" ]
null
null
null
utils/src/basic_operations.py
mjaglarz/BA-thesis
914c2615e368014a3a4dc4074f1419944f523828
[ "MIT" ]
null
null
null
import os import utils def run(fd): ntests = 3 nincr_small = 1000 nincr_big = 10000000 utils.test_sleep(fd, ntests) utils.test_for_sleep(fd, ntests) utils.test_add(fd, ntests, nincr_small) utils.test_add(fd, ntests, nincr_big) utils.test_add_if(fd, ntests, nincr_small) utils.test_add_if(fd, ntests, nincr_big) utils.test_subtract(fd, ntests, nincr_small) utils.test_subtract(fd, ntests, nincr_big) utils.test_subtract_if(fd, ntests, nincr_small) utils.test_subtract_if(fd, ntests, nincr_big) utils.test_multiply(fd, ntests, nincr_small) utils.test_multiply(fd, ntests, nincr_big) utils.test_multiply_if(fd, ntests, nincr_small) utils.test_multiply_if(fd, ntests, nincr_big) utils.test_divide(fd, ntests, nincr_small) utils.test_divide(fd, ntests, nincr_big) utils.test_divide_if(fd, ntests, nincr_small) utils.test_divide_if(fd, ntests, nincr_big) if __name__ == "__main__": file_name = '/results/basic_operations/python_basic_operations.txt' path = utils.chop_suffix_from_path(os.path.dirname(os.path.abspath(__file__)), '/utils/src') + file_name with open(path, 'w') as fd: run(fd)
27.953488
108
0.721298
acedae8f7fc5c751194a3ad9f57caca4cea3aeb8
9,717
py
Python
misc/acrn-config/hv_config/hv_item.py
stanleyintel/acrn-hypervisor
0461ac209f5f265c269b6d77415043b6f028598b
[ "BSD-3-Clause" ]
null
null
null
misc/acrn-config/hv_config/hv_item.py
stanleyintel/acrn-hypervisor
0461ac209f5f265c269b6d77415043b6f028598b
[ "BSD-3-Clause" ]
null
null
null
misc/acrn-config/hv_config/hv_item.py
stanleyintel/acrn-hypervisor
0461ac209f5f265c269b6d77415043b6f028598b
[ "BSD-3-Clause" ]
null
null
null
# Copyright (C) 2020 Intel Corporation. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # import common import hv_cfg_lib class LogLevel: def __init__(self, hv_file): self.hv_file = hv_file self.npk = 0 self.mem = 0 self.console = 0 def get_info(self): self.npk = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "NPK_LOGLEVEL") self.mem = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "MEM_LOGLEVEL") self.console = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "CONSOLE_LOGLEVEL") def check_item(self): hv_cfg_lib.hv_range_check(self.npk, "DEBUG_OPTIONS", "NPK_LOGLEVEL", hv_cfg_lib.RANGE_DB['LOG_LEVEL']) hv_cfg_lib.hv_range_check(self.mem, "DEBUG_OPTIONS", "MEM_LOGLEVEL", hv_cfg_lib.RANGE_DB['LOG_LEVEL']) hv_cfg_lib.hv_range_check(self.console, "DEBUG_OPTIONS", "CONSOLE_LOGLEVEL", hv_cfg_lib.RANGE_DB['LOG_LEVEL']) class LogOpt: def __init__(self, hv_file): self.hv_file = hv_file self.dest = 0 self.release = '' self.buf_size = 0 self.level = LogLevel(self.hv_file) def get_info(self): self.release = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "RELEASE") self.dest = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "LOG_DESTINATION") self.buf_size = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "LOG_BUF_SIZE") self.level.get_info() def check_item(self): hv_cfg_lib.release_check(self.release, "DEBUG_OPTIONS", "RELEASE") hv_cfg_lib.hv_range_check(self.dest, "DEBUG_OPTIONS", "LOG_DESTINATION", hv_cfg_lib.RANGE_DB['LOG_DESTINATION_BITMAP']) hv_cfg_lib.hv_size_check(self.buf_size, "DEBUG_OPTIONS", "LOG_BUF_SIZE") self.level.check_item() class CapHv: def __init__(self, hv_file): self.hv_file = hv_file self.max_emu_mmio_regions = 0 self.max_pt_irq_entries = 0 self.max_ioapic_num = 0 self.max_ioapic_lines = 0 self.max_ir_entries = 0 self.iommu_bus_num = 0 self.max_pci_dev_num = 0 self.max_msix_table_num = 0 def get_info(self): self.max_emu_mmio_regions = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_EMULATED_MMIO") self.max_pt_irq_entries = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_PT_IRQ_ENTRIES") self.max_ioapic_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_IOAPIC_NUM") self.max_ioapic_lines = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_IOAPIC_LINES") self.max_ir_entries = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_IR_ENTRIES") self.iommu_bus_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "IOMMU_BUS_NUM") self.max_pci_dev_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_PCI_DEV_NUM") self.max_msix_table_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_MSIX_TABLE_NUM") def check_item(self): hv_cfg_lib.hv_range_check(self.max_emu_mmio_regions, "CAPACITIES", "MAX_EMULATED_MMIO", hv_cfg_lib.RANGE_DB['EMULATED_MMIO_REGIONS']) hv_cfg_lib.hv_range_check(self.max_pt_irq_entries, "CAPACITIES", "MAX_PT_IRQ_ENTRIES", hv_cfg_lib.RANGE_DB['PT_IRQ_ENTRIES']) hv_cfg_lib.hv_range_check(self.max_ioapic_num, "CAPACITIES", "MAX_IOAPIC_NUM", hv_cfg_lib.RANGE_DB['IOAPIC_NUM']) hv_cfg_lib.hv_range_check(self.max_ioapic_lines, "CAPACITIES", "MAX_IOAPIC_LINES", hv_cfg_lib.RANGE_DB['IOAPIC_LINES']) hv_cfg_lib.ir_entries_check(self.max_ir_entries, "CAPACITIES", "MAX_IR_ENTRIES") hv_cfg_lib.hv_size_check(self.iommu_bus_num, "CAPACITIES", "IOMMU_BUS_NUM") hv_cfg_lib.hv_range_check(self.max_pci_dev_num, "CAPACITIES", "MAX_PCI_DEV_NUM", hv_cfg_lib.RANGE_DB['PCI_DEV_NUM']) hv_cfg_lib.max_msix_table_num_check(self.max_msix_table_num, "CAPACITIES", "MAX_MSIX_TABLE_NUM") class MisCfg: def __init__(self, hv_file): self.hv_file = hv_file self.gpu_sbdf = 0 self.uefi_os_loader_name = '' def get_info(self): self.gpu_sbdf = common.get_hv_item_tag(self.hv_file, "MISC_CFG", "GPU_SBDF") self.uefi_os_loader_name = common.get_hv_item_tag(self.hv_file, "MISC_CFG", "UEFI_OS_LOADER_NAME") def check_item(self): hv_cfg_lib.hv_size_check(self.gpu_sbdf, "MISC_CFG", "GPU_SBDF") hv_cfg_lib.uefi_load_name_check(self.uefi_os_loader_name, "MISC_CFG", "UEFI_OS_LOADER_NAME") class Features: def __init__(self, hv_file): self.hv_file = hv_file self.reloc = '' self.multiboot2 = '' self.rdt_enabled = '' self.cdp_enabled = '' self.cat_max_mask = [] self.mba_delay = [] self.scheduler = '' self.hyperv_enabled = '' self.iommu_enforce_snp = '' self.acpi_parse_enabled = '' self.l1d_flush_vmentry_enabled = '' self.mce_on_psc_workaround_disabled = '' def get_info(self): self.multiboot2 = common.get_hv_item_tag(self.hv_file, "FEATURES", "MULTIBOOT2") self.rdt_enabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "RDT", "RDT_ENABLED") self.cdp_enabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "RDT", "CDP_ENABLED") self.cat_max_mask = common.get_hv_item_tag(self.hv_file, "FEATURES", "RDT", "CLOS_MASK") self.mba_delay = common.get_hv_item_tag(self.hv_file, "FEATURES", "RDT", "MBA_DELAY") self.scheduler = common.get_hv_item_tag(self.hv_file, "FEATURES", "SCHEDULER") self.reloc = common.get_hv_item_tag(self.hv_file, "FEATURES", "RELOC") self.hyperv_enabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "HYPERV_ENABLED") self.acpi_parse_enabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "ACPI_PARSE_ENABLED") self.l1d_flush_vmentry_enabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "L1D_VMENTRY_ENABLED") self.mce_on_psc_workaround_disabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "MCE_ON_PSC_DISABLED") self.iommu_enforce_snp = common.get_hv_item_tag(self.hv_file, "FEATURES", "IOMMU_ENFORCE_SNP") def check_item(self): hv_cfg_lib.ny_support_check(self.multiboot2, "FEATURES", "MULTIBOOT2") hv_cfg_lib.ny_support_check(self.rdt_enabled, "FEATURES", "RDT", "RDT_ENABLED") hv_cfg_lib.ny_support_check(self.cdp_enabled, "FEATURES", "RDT", "CDP_ENABLED") hv_cfg_lib.cat_max_mask_check(self.cat_max_mask, "FEATURES", "RDT", "CLOS_MASK") hv_cfg_lib.mba_delay_check(self.mba_delay, "FEATURES", "RDT", "MBA_DELAY") hv_cfg_lib.scheduler_check(self.scheduler, "FEATURES", "SCHEDULER") hv_cfg_lib.ny_support_check(self.reloc, "FEATURES", "RELOC") hv_cfg_lib.ny_support_check(self.hyperv_enabled, "FEATURES", "HYPERV_ENABLED") hv_cfg_lib.ny_support_check(self.acpi_parse_enabled, "FEATURES", "ACPI_PARSE_ENABLED") hv_cfg_lib.ny_support_check(self.l1d_flush_vmentry_enabled, "FEATURES", "L1D_VMENTRY_ENABLED") hv_cfg_lib.ny_support_check(self.mce_on_psc_workaround_disabled, "FEATURES", "MCE_ON_PSC_DISABLED") hv_cfg_lib.ny_support_check(self.iommu_enforce_snp, "FEATURES", "IOMMU_ENFORCE_SNP") class Memory: def __init__(self, hv_file): self.hv_file = hv_file self.stack_size = 0 self.low_ram_size = 0 self.hv_ram_start = 0 self.hv_ram_size = 0 self.platform_ram_size = 0 self.sos_ram_size = 0 self.uos_ram_size = 0 self.ivshmem_enable = 'n' self.ivshmem_region = [] def get_info(self): self.stack_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "STACK_SIZE") self.low_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "LOW_RAM_SIZE") self.hv_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_SIZE") self.hv_ram_start = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_START") self.platform_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "PLATFORM_RAM_SIZE") self.sos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "SOS_RAM_SIZE") self.uos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "UOS_RAM_SIZE") self.ivshmem_enable = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.ivshmem_region = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") def check_item(self): hv_cfg_lib.hv_size_check(self.stack_size, "MEMORY", "STACK_SIZE") hv_cfg_lib.hv_size_check(self.low_ram_size, "MEMORY", "LOW_RAM_SIZE") hv_cfg_lib.hv_size_check(self.platform_ram_size, "MEMORY", "PLATFORM_RAM_SIZE") hv_cfg_lib.hv_size_check(self.sos_ram_size, "MEMORY", "SOS_RAM_SIZE") hv_cfg_lib.hv_size_check(self.uos_ram_size, "MEMORY", "UOS_RAM_SIZE") hv_cfg_lib.ny_support_check(self.ivshmem_enable, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") class HvInfo: def __init__(self, hv_file): self.hv_file = hv_file self.mem = Memory(self.hv_file) self.cap = CapHv(self.hv_file) self.log = LogOpt(self.hv_file) self.mis = MisCfg(self.hv_file) self.features = Features(self.hv_file) def get_info(self): self.mem.get_info() self.log.get_info() self.cap.get_info() self.mis.get_info() self.features.get_info() def check_item(self): self.mem.check_item() self.log.check_item() self.cap.check_item() self.mis.check_item() self.features.check_item()
48.10396
141
0.702377
acedb04df4e80fa3f16d484705b244d062443281
3,566
py
Python
uds/segmentation/abstract_segmenter.py
mdabrowski1990/uds
1aee0c1de446ee3dd461706949504f2c218db1e8
[ "MIT" ]
18
2021-03-28T22:39:18.000Z
2022-02-13T21:50:37.000Z
uds/segmentation/abstract_segmenter.py
mdabrowski1990/uds
1aee0c1de446ee3dd461706949504f2c218db1e8
[ "MIT" ]
153
2021-02-09T09:27:05.000Z
2022-03-29T06:09:15.000Z
uds/segmentation/abstract_segmenter.py
mdabrowski1990/uds
1aee0c1de446ee3dd461706949504f2c218db1e8
[ "MIT" ]
1
2021-05-13T16:01:46.000Z
2021-05-13T16:01:46.000Z
"""Definition of API for segmentation and desegmentation strategies.""" __all__ = ["SegmentationError", "AbstractSegmenter"] from typing import Tuple, Type, Union, Any from abc import ABC, abstractmethod from uds.message import UdsMessage, UdsMessageRecord from uds.packet import AbstractUdsPacketContainer, PacketsContainersSequence, PacketsTuple class SegmentationError(ValueError): """UDS segmentation or desegmentation process cannot be completed due to input data inconsistency.""" class AbstractSegmenter(ABC): """ Abstract definition of a segmenter class. Segmenter classes defines UDS segmentation and desegmentation duties. They contain helper methods that are essential for successful :ref:`segmentation <knowledge-base-message-segmentation>` and :ref:`desegmentation <knowledge-base-packets-desegmentation>` execution. .. note:: Each concrete segmenter class handles exactly one bus. """ @property @abstractmethod def supported_packet_classes(self) -> Tuple[Type[AbstractUdsPacketContainer], ...]: """Classes that define packet objects supported by this segmenter.""" def is_supported_packet(self, value: Any) -> bool: """ Check if the argument value is a packet object of a supported type. :param value: Value to check. :return: True if provided value is an object of a supported packet type, False otherwise. """ return isinstance(value, self.supported_packet_classes) # type: ignore def is_supported_packets_sequence(self, value: Any) -> bool: """ Check if the argument value is a packet sequence of a supported type. :param value: Value to check. :return: True if provided value is a packet sequence of a supported type, False otherwise. """ if not isinstance(value, (list, tuple)): # not a sequence return False if not all(self.is_supported_packet(element) for element in value): # at least one element is not a packet of a supported type return False # check if all packets are the same type return len({type(element) for element in value}) == 1 @abstractmethod def is_complete_packets_sequence(self, packets: PacketsContainersSequence) -> bool: """ Check whether provided packets are full sequence of packets that form exactly one diagnostic message. :param packets: Packets sequence to check. :return: True if the packets form exactly one diagnostic message. False if there are missing, additional or inconsistent (e.g. two packets that initiate a message) packets. """ @abstractmethod def desegmentation(self, packets: PacketsContainersSequence) -> Union[UdsMessage, UdsMessageRecord]: """ Perform desegmentation of UDS packets. :param packets: UDS packets to desegment into UDS message. :raise SegmentationError: Provided packets are not a complete packet sequence that form a diagnostic message. :return: A diagnostic message that is an outcome of UDS packets desegmentation. """ @abstractmethod def segmentation(self, message: UdsMessage) -> PacketsTuple: """ Perform segmentation of a diagnostic message. :param message: UDS message to divide into UDS packets. :raise SegmentationError: Provided diagnostic message cannot be segmented. :return: UDS packets that are an outcome of UDS message segmentation. """
37.93617
118
0.699944
acedb153c7dffb5b8c04d2577fcc0073236486e6
1,111
py
Python
pycu/driver/core/stream.py
uchytilc/pycu
763060c969c6c246834c732e5fd44631652decab
[ "MIT" ]
null
null
null
pycu/driver/core/stream.py
uchytilc/pycu
763060c969c6c246834c732e5fd44631652decab
[ "MIT" ]
null
null
null
pycu/driver/core/stream.py
uchytilc/pycu
763060c969c6c246834c732e5fd44631652decab
[ "MIT" ]
null
null
null
from pycu.driver import stream_create, stream_destroy, stream_synchronize, stream_query import weakref class Stream: def __init__(self, *, handle = None, auto_free = True): if handle is None: handle = stream_create() if auto_free: weakref.finalize(self, stream_destroy, handle) self.handle = handle def __repr__(self): return f"Stream() <{int(self)}>" def __int__(self): return self.handle.value def __index__(self): return int(self) def synchronize(self): stream_synchronize(self.handle) def get_attribute(self): # stream_get_attribute(stream, attr) pass def set_attributes(self): # stream_set_attributes(stream, attr, value) pass def query(self): return stream_query(self.handle) # def flags(self): # return stream_get_flags(stream) # def add_callback(self, callback, arg, flags = 0): # data = (self, callback, arg) # _py_incref(data) # stream_add_callback(self.handle, stream_callback, data, flags) # def get_ctx(self): # #import context manager # ctx = get_context(stream_get_ctx(self.handle)) # return ctx def stream(): return Stream()
21.365385
87
0.721872
acedb154c2697ac0e8223416c35d8a74c1aa34d8
37,134
py
Python
nuitka/codegen/ConstantCodes.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-04-13T18:56:02.000Z
2020-04-13T18:56:02.000Z
nuitka/codegen/ConstantCodes.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-07-11T17:53:56.000Z
2020-07-11T17:53:56.000Z
nuitka/codegen/ConstantCodes.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
null
null
null
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Low level constant code generation. This deals with constants, there creation, there access, and some checks about them. Even mutable constants should not change during the course of the program. There are shared constants, which are created for multiple modules to use, you can think of them as globals. And there are module local constants, which are for a single module only. """ import ctypes import marshal import re import struct import sys from nuitka import Options from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin iterItems, long, unicode, xrange, ) from nuitka.Builtins import builtin_named_values, builtin_named_values_list from nuitka.Constants import NoneType, compareConstants, getConstantWeight, isMutable from nuitka.PythonVersions import python_version from nuitka.Tracing import codegen_missing, general from nuitka.Version import getNuitkaVersion from .BlobCodes import StreamData from .Emission import SourceCodeCollector from .ErrorCodes import getReleaseCode from .Indentation import indented from .templates.CodeTemplatesConstants import template_constants_reading def generateConstantReferenceCode(to_name, expression, emit, context): """ Assign the constant behind the expression to to_name.""" getConstantAccess( to_name=to_name, constant=expression.getConstant(), emit=emit, context=context ) def generateConstantNoneReferenceCode(to_name, expression, emit, context): """ Assign 'None' to to_name.""" # No context or other knowledge needed, pylint: disable=unused-argument if to_name.c_type == "nuitka_bool": emit("%s = NUITKA_BOOL_FALSE;" % to_name) else: emit("%s = Py_None;" % to_name) def generateConstantTrueReferenceCode(to_name, expression, emit, context): """ Assign 'True' to to_name.""" # No context or other knowledge needed, pylint: disable=unused-argument if to_name.c_type == "nuitka_bool": emit("%s = NUITKA_BOOL_TRUE;" % to_name) else: emit("%s = Py_True;" % to_name) def generateConstantFalseReferenceCode(to_name, expression, emit, context): """ Assign 'False' to to_name.""" # No context or other knowledge needed, pylint: disable=unused-argument if to_name.c_type == "nuitka_bool": emit("%s = NUITKA_BOOL_FALSE;" % to_name) else: emit("%s = Py_False;" % to_name) def generateConstantEllipsisReferenceCode(to_name, expression, emit, context): """ Assign 'Ellipsis' to to_name.""" # No context or other knowledge needed, pylint: disable=unused-argument if to_name.c_type == "nuitka_bool": emit("%s = NUITKA_BOOL_FALSE;" % to_name) else: emit("%s = Py_Ellipsis;" % to_name) # One global stream of constant information. In the future it might make # sense to have per module ones, for better locality of indexes within it, # but we don't do this yet. stream_data = StreamData() # TODO: The determination of this should already happen in Building or in a # helper not during code generation. _match_attribute_names = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") def _isAttributeName(value): # TODO: The exception is to make sure we intern the ".0" argument name # used for generator expressions, iterator value. return _match_attribute_names.match(value) or value == ".0" sizeof_long = ctypes.sizeof(ctypes.c_long) max_unsigned_long = 2 ** (sizeof_long * 8) - 1 # The gcc gives a warning for -2**sizeof_long*8-1, which is still an "int", but # seems to not work (without warning) as literal, so avoid it. min_signed_long = -(2 ** (sizeof_long * 8 - 1) - 1) done = set() def _getConstantInitValueCode(constant_value, constant_type): """ Return code, if possible, to create a constant. It's only used for module local constants, like error messages, and provides no caching of the values. When it returns "None", it is in error. """ # This function is a case driven by returns, pylint: disable=too-many-return-statements if constant_type is unicode: # Python3: Strings that can be encoded as UTF-8 are done more or less # directly. When they cannot be expressed as UTF-8, that is rare not we # can indeed use pickling. try: encoded = constant_value.encode("utf-8") if str is bytes: return "UNSTREAM_UNICODE(%s)" % (stream_data.getStreamDataCode(encoded)) else: return "UNSTREAM_STRING(%s, %d, %d)" % ( stream_data.getStreamDataCode(encoded, fixed_size=True), len(constant_value), 1 if _isAttributeName(constant_value) else 0, ) except UnicodeEncodeError: # TODO: try and use "surrogateescape" for this return None elif constant_type is str: assert str is bytes if len(constant_value) == 1: return "UNSTREAM_CHAR(%d, %d)" % ( ord(constant_value[0]), 1 if _isAttributeName(constant_value) else 0, ) else: return "UNSTREAM_STRING(%s, %d)" % ( stream_data.getStreamDataCode(constant_value), 1 if _isAttributeName(constant_value) else 0, ) elif constant_type is bytes: assert str is not bytes return "UNSTREAM_BYTES(%s)" % (stream_data.getStreamDataCode(constant_value)) else: return None def decideMarshal(constant_value): """ Decide of a constant can be created using "marshal" module methods. This is not the case for everything. A prominent exception is types, they are constants, but the "marshal" module refuses to work with them. """ # Many cases to deal with, pylint: disable=too-many-return-statements constant_type = type(constant_value) if constant_type is type: # Types cannot be marshaled, there is no choice about it. return False elif constant_type is dict: # Look at all the keys an values, if one of it cannot be marshaled, # or should not, that is it. for key, value in iterItems(constant_value): if not decideMarshal(key): return False if not decideMarshal(value): return False elif constant_type in (tuple, list, set, frozenset): for element_value in constant_value: if not decideMarshal(element_value): return False elif constant_type is xrange: return False elif constant_type is slice: return False return True def isMarshalConstant(constant_value): """ Decide if we want to use marshal to create a constant. The reason we do this, is because creating dictionaries with 700 elements creates a lot of C code, while gaining usually no performance at all. The MSVC compiler is especially notorious about hanging like forever with this active, due to its optimizer not scaling. Therefore we use a constant "weight" (how expensive it is), and apply that to decide. If marshal is not possible, or constant "weight" is too large, we don't do it. Also, for some constants, marshal can fail, and return other values. Check that too. In that case, we have to create it. """ if not decideMarshal(constant_value): return False if getConstantWeight(constant_value) < 20: return False try: marshal_value = marshal.dumps(constant_value) except ValueError: if Options.is_debug: codegen_missing.warning("Failed to marshal constant %r." % constant_value) return False restored = marshal.loads(marshal_value) r = compareConstants(constant_value, restored) if not r: pass # TODO: Potentially warn about these, where that is not the case. return r def getMarshalCode(constant_identifier, constant_value, emit): """ Force the marshal of a value. """ marshal_value = marshal.dumps(constant_value) restored = marshal.loads(marshal_value) assert compareConstants(constant_value, restored) emit( "%s = PyMarshal_ReadObjectFromString((char *)%s);" % (constant_identifier, stream_data.getStreamDataCode(marshal_value)) ) def attemptToMarshal(constant_identifier, constant_value, emit): """ Try and marshal a value, if so decided. Indicate with return value. See above for why marshal is only used in problematic cases. """ if not isMarshalConstant(constant_value): return False marshal_value = marshal.dumps(constant_value) restored = marshal.loads(marshal_value) # TODO: The check in isMarshalConstant is currently preventing this from # happening. if not compareConstants(constant_value, restored): general.warning("Problem with marshal of constant %r", constant_value) return False emit( "%s = PyMarshal_ReadObjectFromString((char *)%s);" % (constant_identifier, stream_data.getStreamDataCode(marshal_value)) ) return True def _addConstantInitCode( context, emit, check, constant_type, constant_value, constant_identifier, module_level, ): """ Emit code for a specific constant to be prepared during init. This may be module or global init. Code makes sure that nested constants belong into the same scope. """ # Got a couple of values to dodge, pylint: disable=too-many-return-statements if constant_value is None: return elif constant_value is False: return elif constant_value is True: return elif constant_value is Ellipsis: return elif constant_value is NotImplemented: return elif type(constant_value) is type: return elif constant_identifier in done: # Do not repeat ourselves. return if Options.shallTraceExecution(): emit("""NUITKA_PRINT_TRACE("Creating constant: %s");""" % constant_identifier) # Then it's a real named constant not yet created. __addConstantInitCode( context, emit, check, constant_type, constant_value, constant_identifier, module_level, ) # In debug mode, lets check if the constants somehow change behind our # back, add those values too. if Options.isDebug(): emit( """\ hash_%(constant_identifier)s = DEEP_HASH(%(constant_identifier)s);""" % {"constant_identifier": constant_identifier} ) check( """\ CHECK_OBJECT(%(constant_identifier)s); assert(hash_%(constant_identifier)s == DEEP_HASH(%(constant_identifier)s));""" % {"constant_identifier": constant_identifier} ) def __addConstantInitCode( context, emit, check, constant_type, constant_value, constant_identifier, module_level, ): """ Emit code for a specific constant to be prepared during init. This may be module or global init. Code makes sure that nested constants belong into the same scope. """ # This has many cases, that all return, and do a lot. # pylint: disable=too-many-branches,too-many-locals,too-many-return-statements,too-many-statements # For the module level, we only mean to create constants that are used only # inside of it. For the global level, it must must be single use. if module_level: if context.global_context.getConstantUseCount(constant_identifier) != 1: return else: if context.getConstantUseCount(constant_identifier) == 1: return # Adding it to "done". We cannot have recursive constants, so this is OK # to be done now. done.add(constant_identifier) # Use shortest code for ints and longs. if constant_type is long: # See above, same for long values. Note: These are of course not # existent with Python3 which would have covered it before. if 0 <= constant_value <= max_unsigned_long: emit( "%s = PyLong_FromUnsignedLong(%sul);" % (constant_identifier, constant_value) ) return elif 0 > constant_value >= min_signed_long: emit("%s = PyLong_FromLong(%sl);" % (constant_identifier, constant_value)) return elif constant_value == min_signed_long - 1: # There are compilers out there, that give warnings for the literal # MININT when used. We work around that warning here. emit( """\ %s = PyLong_FromLong(%sl); // To be corrected with -1 in-place next lines. CHECK_OBJECT(const_int_pos_1); %s = PyNumber_InPlaceSubtract(%s, PyLong_FromLong(1));""" % ( constant_identifier, min_signed_long, constant_identifier, constant_identifier, ) ) return else: getMarshalCode( constant_identifier=constant_identifier, constant_value=constant_value, emit=emit, ) return elif constant_type is int: if constant_value >= min_signed_long: emit("%s = PyInt_FromLong(%sl);" % (constant_identifier, constant_value)) return else: # There are compilers out there, that give warnings for the literal # MININT when used. We work around that warning here. assert constant_value == min_signed_long - 1 emit( """\ %s = PyInt_FromLong(%sl); // To be corrected in next line. %s = PyNumber_InPlaceSubtract(%s, PyInt_FromLong(1));""" % ( constant_identifier, min_signed_long, constant_identifier, constant_identifier, ) ) return if constant_type is unicode: try: encoded = constant_value.encode("utf-8") if str is bytes: emit( "%s = UNSTREAM_UNICODE(%s);" % (constant_identifier, stream_data.getStreamDataCode(encoded)) ) else: if str is not bytes and len(constant_value) == len(encoded): emit( "%s = UNSTREAM_STRING_ASCII(%s, %d);" % ( constant_identifier, stream_data.getStreamDataCode(encoded), 1 if _isAttributeName(constant_value) else 0, ) ) else: emit( "%s = UNSTREAM_STRING(%s, %d);" % ( constant_identifier, stream_data.getStreamDataCode(encoded), 1 if _isAttributeName(constant_value) else 0, ) ) return except UnicodeEncodeError: getMarshalCode( constant_identifier=constant_identifier, constant_value=constant_value, emit=emit, ) return elif constant_type is str: # Python3: Strings that can be encoded as UTF-8 are done more or less # directly. When they cannot be expressed as UTF-8, that is rare not we # can indeed use pickling. assert str is bytes if len(constant_value) == 1: emit( "%s = UNSTREAM_CHAR(%d, %d);" % ( constant_identifier, ord(constant_value[0]), 1 if _isAttributeName(constant_value) else 0, ) ) else: emit( "%s = UNSTREAM_STRING(%s, %d);" % ( constant_identifier, stream_data.getStreamDataCode(constant_value), 1 if _isAttributeName(constant_value) else 0, ) ) return elif constant_type is bytes: # Python3 only, for Python2, bytes do not happen. assert str is not bytes emit( "%s = UNSTREAM_BYTES(%s);" % (constant_identifier, stream_data.getStreamDataCode(constant_value)) ) return if constant_type is float: emit( "%s = UNSTREAM_FLOAT(%s);" % ( constant_identifier, stream_data.getStreamDataCode( value=struct.pack("<d", constant_value), fixed_size=True ), ) ) return if constant_type is dict: # Not all dictionaries can or should be marshaled. For small ones, # or ones with strange values, like "{1:type}", we have to do it. if attemptToMarshal(constant_identifier, constant_value, emit): return emit( "%s = _PyDict_NewPresized( %d );" % (constant_identifier, len(constant_value)) ) for key, value in iterItems(constant_value): key_name = context.getConstantCode(key) _addConstantInitCode( emit=emit, check=check, constant_type=type(key), constant_value=key, constant_identifier=key_name, module_level=module_level, context=context, ) value_name = context.getConstantCode(value) _addConstantInitCode( emit=emit, check=check, constant_type=type(value), constant_value=value, constant_identifier=value_name, module_level=module_level, context=context, ) # TODO: Error checking for debug. emit( "PyDict_SetItem(%s, %s, %s);" % (constant_identifier, key_name, value_name) ) emit( "assert(PyDict_Size(%s) == %d);" % (constant_identifier, len(constant_value)) ) return if constant_type is tuple: # Not all tuples can or should be marshaled. For small ones, # or ones with strange values, like "(type,)", we have to do it. if attemptToMarshal(constant_identifier, constant_value, emit): return emit("%s = PyTuple_New(%d);" % (constant_identifier, len(constant_value))) for count, element_value in enumerate(constant_value): element_name = context.getConstantCode(constant=element_value) _addConstantInitCode( emit=emit, check=check, constant_type=type(element_value), constant_value=element_value, constant_identifier=context.getConstantCode(constant=element_value), module_level=module_level, context=context, ) # Do not take references, these won't be deleted ever. emit( "PyTuple_SET_ITEM(%s, %d, %s); Py_INCREF(%s);" % (constant_identifier, count, element_name, element_name) ) return if constant_type is list: # Not all lists can or should be marshaled. For small ones, # or ones with strange values, like "[type]", we have to do it. if attemptToMarshal(constant_identifier, constant_value, emit): return emit("%s = PyList_New(%d);" % (constant_identifier, len(constant_value))) for count, element_value in enumerate(constant_value): element_name = context.getConstantCode(constant=element_value) _addConstantInitCode( emit=emit, check=check, constant_type=type(element_value), constant_value=element_value, constant_identifier=element_name, module_level=module_level, context=context, ) # Do not take references, these won't be deleted ever. emit( "PyList_SET_ITEM(%s, %d, %s); Py_INCREF(%s);" % (constant_identifier, count, element_name, element_name) ) return if constant_type is set or constant_type is frozenset: # Not all sets can or should be marshaled. For small ones, # or ones with strange values, like "{type}", we have to do it. if attemptToMarshal(constant_identifier, constant_value, emit): return # Special handling for empty frozensets. if not constant_value and constant_type is frozenset: emit( "%s = PyObject_CallFunction((PyObject*)&PyFrozenSet_Type, NULL);" % (constant_identifier,) ) return # TODO: Hinting size is really not possible? emit( "%s = %s(NULL);" % ( constant_identifier, "PySet_New" if constant_type is set else "PyFrozenSet_New", ) ) for element_value in constant_value: element_name = context.getConstantCode(element_value) _addConstantInitCode( emit=emit, check=check, constant_type=type(element_value), constant_value=element_value, constant_identifier=element_name, module_level=module_level, context=context, ) emit("PySet_Add(%s, %s);" % (constant_identifier, element_name)) emit( "assert(PySet_Size(%s) == %d);" % (constant_identifier, len(constant_value)) ) return if constant_type is slice: slice1_name = context.getConstantCode(constant_value.start) _addConstantInitCode( emit=emit, check=check, constant_type=type(constant_value.start), constant_value=constant_value.start, constant_identifier=slice1_name, module_level=module_level, context=context, ) slice2_name = context.getConstantCode(constant_value.stop) _addConstantInitCode( emit=emit, check=check, constant_type=type(constant_value.stop), constant_value=constant_value.stop, constant_identifier=slice2_name, module_level=module_level, context=context, ) slice3_name = context.getConstantCode(constant_value.step) _addConstantInitCode( emit=emit, check=check, constant_type=type(constant_value.step), constant_value=constant_value.step, constant_identifier=slice3_name, module_level=module_level, context=context, ) emit( "%s = PySlice_New(%s, %s, %s);" % (constant_identifier, slice1_name, slice2_name, slice3_name) ) return if constant_type is xrange: # Strip const_xrange. assert constant_identifier.startswith("const_xrange_") # For Python2, xrange needs only long values to be created, so avoid objects. range_args = constant_identifier[13:].split("_") # Default start. if len(range_args) == 1: range_args.insert(0, "0") # Default step if len(range_args) < 3: range_args.append("1") # Negative values are encoded with "neg" prefix. range_args = [int(range_arg.replace("neg", "-")) for range_arg in range_args] if xrange is not range: emit( "%s = MAKE_XRANGE(%s, %s, %s);" % (constant_identifier, range_args[0], range_args[1], range_args[2]) ) else: range1_name = context.getConstantCode(range_args[0]) _addConstantInitCode( emit=emit, check=check, constant_type=type(range_args[0]), constant_value=range_args[0], constant_identifier=range1_name, module_level=module_level, context=context, ) range2_name = context.getConstantCode(range_args[1]) _addConstantInitCode( emit=emit, check=check, constant_type=type(range_args[1]), constant_value=range_args[1], constant_identifier=range2_name, module_level=module_level, context=context, ) range3_name = context.getConstantCode(range_args[2]) _addConstantInitCode( emit=emit, check=check, constant_type=type(range_args[2]), constant_value=range_args[2], constant_identifier=range3_name, module_level=module_level, context=context, ) emit( "%s = BUILTIN_XRANGE3(%s, %s, %s);" % (constant_identifier, range1_name, range2_name, range3_name) ) return if constant_type is bytearray: emit( "%s = UNSTREAM_BYTEARRAY(%s);" % ( constant_identifier, stream_data.getStreamDataCode(bytes(constant_value)), ) ) return if constant_type is complex: getMarshalCode( constant_identifier=constant_identifier, constant_value=constant_value, emit=emit, ) return if constant_value in builtin_named_values_list: builtin_name = builtin_named_values[constant_value] builtin_identifier = context.getConstantCode(builtin_name) _addConstantInitCode( emit=emit, check=check, constant_type=type(builtin_name), constant_value=builtin_name, constant_identifier=builtin_identifier, module_level=module_level, context=context, ) emit("%s = LOOKUP_BUILTIN(%s);" % (constant_identifier, builtin_identifier)) return # Must not reach this, if we did, it's in error, and we need to know. assert False, (type(constant_value), constant_value, constant_identifier) def getConstantsInitCode(context): emit = SourceCodeCollector() check = SourceCodeCollector() # Sort items by length and name, so we are deterministic and pretty. sorted_constants = sorted( iterItems(context.getConstants()), key=lambda k: (len(k[0]), k[0]) ) for constant_identifier, constant_value in sorted_constants: _addConstantInitCode( emit=emit, check=check, constant_type=type(constant_value), constant_value=constant_value, constant_identifier=constant_identifier, module_level=False, context=context, ) return emit.codes, check.codes def getConstantsDeclCode(context): statements = [] # Sort items by length and name, so we are deterministic and pretty. sorted_constants = sorted( iterItems(context.getConstants()), key=lambda k: (len(k[0]), k[0]) ) for constant_identifier, constant_value in sorted_constants: # Need not declare built-in types. if constant_value is None: continue if constant_value is False: continue if constant_value is True: continue if constant_value is Ellipsis: continue if constant_value is NotImplemented: continue if type(constant_value) is type: continue if context.getConstantUseCount(constant_identifier) != 1: statements.append("PyObject *%s;" % constant_identifier) if Options.isDebug(): statements.append("Py_hash_t hash_%s;" % constant_identifier) return statements def getConstantAccess(to_name, constant, emit, context): # Many cases, because for each type, we may copy or optimize by creating # empty. pylint: disable=too-many-branches,too-many-statements if to_name.c_type == "nuitka_bool" and Options.is_debug: codegen_missing.info("Missing optimization for constant to C bool.") if type(constant) is dict: if constant: for key, value in iterItems(constant): # key cannot be mutable. assert not isMutable(key) if isMutable(value): needs_deep = True break else: needs_deep = False if needs_deep: code = "DEEP_COPY(%s)" % context.getConstantCode(constant) else: code = "PyDict_Copy(%s)" % context.getConstantCode(constant) else: code = "PyDict_New()" ref_count = 1 elif type(constant) is set: if constant: code = "PySet_New(%s)" % context.getConstantCode(constant) else: code = "PySet_New(NULL)" ref_count = 1 elif type(constant) is list: if constant: for value in constant: if isMutable(value): needs_deep = True break else: needs_deep = False if needs_deep: code = "DEEP_COPY(%s)" % context.getConstantCode(constant) else: code = "LIST_COPY(%s)" % context.getConstantCode(constant) else: code = "PyList_New(0)" ref_count = 1 elif type(constant) is tuple: for value in constant: if isMutable(value): needs_deep = True break else: needs_deep = False if needs_deep: code = "DEEP_COPY(%s)" % context.getConstantCode(constant) ref_count = 1 else: code = context.getConstantCode(constant) ref_count = 0 elif type(constant) is bytearray: code = "BYTEARRAY_COPY(%s)" % context.getConstantCode(constant) ref_count = 1 else: code = context.getConstantCode(constant=constant) ref_count = 0 if to_name.c_type == "PyObject *": value_name = to_name else: value_name = context.allocateTempName("constant_value") emit("%s = %s;" % (value_name, code)) if to_name is not value_name: to_name.getCType().emitAssignConversionCode( to_name=to_name, value_name=value_name, needs_check=False, emit=emit, context=context, ) # Above is supposed to transfer ownership. if ref_count: getReleaseCode(value_name, emit, context) else: if ref_count: context.addCleanupTempName(value_name) def getModuleConstantCode(constant): assert type(constant) is str result = _getConstantInitValueCode( constant_value=constant, constant_type=type(constant) ) assert result is not None return result constant_counts = {} def getConstantInitCodes(module_context): decls = [] inits = SourceCodeCollector() checks = SourceCodeCollector() sorted_constants = sorted( module_context.getConstants(), key=lambda k: (len(k[0]), k[0]) ) global_context = module_context.global_context for constant_identifier in sorted_constants: if not constant_identifier.startswith("const_"): continue if global_context.getConstantUseCount(constant_identifier) == 1: qualifier = "static" constant_value = global_context.constants[constant_identifier] _addConstantInitCode( emit=inits, check=checks, constant_type=type(constant_value), constant_value=constant_value, constant_identifier=constant_identifier, module_level=True, context=module_context, ) else: qualifier = "extern" decls.append("%s PyObject *%s;" % (qualifier, constant_identifier)) if Options.isDebug(): decls.append("%s Py_hash_t hash_%s;" % (qualifier, constant_identifier)) return decls, inits.codes, checks.codes def allocateNestedConstants(module_context): # Lots of types to deal with. def considerForDeferral(constant_value): module_context.getConstantCode(constant_value) if isMarshalConstant(constant_value): return constant_type = type(constant_value) if constant_type in (tuple, list, set, frozenset): for element in constant_value: considerForDeferral(element) elif constant_type is dict: for key, value in iterItems(constant_value): considerForDeferral(key) considerForDeferral(value) elif constant_type is slice: considerForDeferral(constant_value.start) considerForDeferral(constant_value.step) considerForDeferral(constant_value.stop) elif constant_type is xrange: if xrange is range: # For Python2 ranges, we use C long values directly. considerForDeferral(constant_value.start) considerForDeferral(constant_value.step) considerForDeferral(constant_value.stop) elif constant_value in builtin_named_values_list: considerForDeferral(builtin_named_values[constant_value]) for constant_identifier in set(module_context.getConstants()): constant_value = module_context.global_context.constants[constant_identifier] constant_type = type(constant_value) if constant_type in (tuple, dict, list, set, frozenset, slice, xrange): considerForDeferral(constant_value) elif constant_type in (str, NoneType, int, long): pass elif constant_value in builtin_named_values_list: considerForDeferral(builtin_named_values[constant_value]) def getConstantsDefinitionCode(context): """ Create the code code "__constants.c" file. This needs to create code to make all global constants (used in more than one module) and create them. """ constant_inits, constant_checks = getConstantsInitCode(context=context) constant_declarations = getConstantsDeclCode(context=context) sys_executable = None sys_prefix = None sys_base_prefix = None sys_exec_prefix = None sys_base_exec_prefix = None if not Options.shallMakeModule(): sys_executable = context.getConstantCode(sys.executable) sys_prefix = context.getConstantCode(sys.prefix) sys_exec_prefix = context.getConstantCode(sys.exec_prefix) if python_version >= 300: sys_base_prefix = context.getConstantCode(sys.base_prefix) sys_base_exec_prefix = context.getConstantCode(sys.base_exec_prefix) major, minor, micro = getNuitkaVersion().split(".")[:3] if "rc" in micro: micro = micro[: micro.find("rc")] level = "candidate" else: level = "release" return template_constants_reading % { "constant_declarations": "\n".join(constant_declarations), "constant_inits": indented(constant_inits), "constant_checks": indented(constant_checks), "sys_executable": sys_executable, "sys_prefix": sys_prefix, "sys_base_prefix": sys_base_prefix, "sys_exec_prefix": sys_exec_prefix, "sys_base_exec_prefix": sys_base_exec_prefix, "nuitka_version_major": major, "nuitka_version_minor": minor, "nuitka_version_micro": micro, "nuitka_version_level": level, }
32.45979
102
0.60621
acedb25db5231f233a6cb27165ce64bac154136c
356
py
Python
Resources/read.py
ozkibr/opencv-course
099eedeb1730082c7bfc2721470a97baef09b9d6
[ "MIT" ]
null
null
null
Resources/read.py
ozkibr/opencv-course
099eedeb1730082c7bfc2721470a97baef09b9d6
[ "MIT" ]
null
null
null
Resources/read.py
ozkibr/opencv-course
099eedeb1730082c7bfc2721470a97baef09b9d6
[ "MIT" ]
null
null
null
from cv2 import cv2 as cv # img = cv.imread('Photos/cat_large.jpg') # cv.imshow('Cat', img) # cv.waitKey(0) capture = cv.VideoCapture('Videos/dog.mp4') while True: isTrue, Frame = capture.read() cv.imshow('Video', Frame) if cv.waitKey(20) & 0XFF == ord('e'): break capture.release() cv.destroyAllWindows() print(cv.waitKey(10))
16.181818
43
0.646067
acedb4023e017938e19edc6f8d85cb0ea5a8a768
1,447
py
Python
iroko/vocabularies/admin.py
tocororo/iroko
e1cf08bbd565178c2d60244719aad6d288b48363
[ "MIT" ]
null
null
null
iroko/vocabularies/admin.py
tocororo/iroko
e1cf08bbd565178c2d60244719aad6d288b48363
[ "MIT" ]
14
2021-02-02T22:47:27.000Z
2021-12-22T18:39:46.000Z
iroko/vocabularies/admin.py
tocororo/iroko
e1cf08bbd565178c2d60244719aad6d288b48363
[ "MIT" ]
null
null
null
# Copyright (c) 2021. Universidad de Pinar del Rio # This file is part of SCEIBA (sceiba.cu). # SCEIBA is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. # """Iroko Admin views.""" from flask_admin.contrib.sqla import ModelView from .models import Term, Vocabulary class VocabularyModelView(ModelView): """View for managing vocabularies.""" # can_view_details = True list_all = ('id', 'identifier', 'human_name', 'description') column_list = list_all column_default_sort = ('id', True) column_filters = list_all form_columns = ('identifier', 'human_name', 'description') class TermModelView(ModelView): """View for managing terms.""" # can_view_details = True list_all = ('id', 'identifier', 'description', 'vocabulary', 'uuid') column_list = list_all column_default_sort = ('identifier', True) column_filters = ('id', 'uuid', 'identifier', 'vocabulary') # form_columns = ('name', 'description') form_columns = ('vocabulary', 'identifier', 'description', 'parent_id') vocabularies_adminview = dict( modelview=VocabularyModelView, model=Vocabulary, name='Vocabularies', category='Iroko' ) terms_adminview = dict( modelview=TermModelView, model=Term, name='Terms', category='Iroko' ) __all__ = ('vocabularies_adminview', 'terms_adminview')
22.968254
75
0.684174
acedb4fdfbe19b04283f94989511932c78f431f7
5,564
py
Python
classy_vision/optim/param_scheduler/composite_scheduler.py
miguelvr/ClassyVision
38a59270e16fda83e160c5888b96c777cb78757b
[ "MIT" ]
null
null
null
classy_vision/optim/param_scheduler/composite_scheduler.py
miguelvr/ClassyVision
38a59270e16fda83e160c5888b96c777cb78757b
[ "MIT" ]
null
null
null
classy_vision/optim/param_scheduler/composite_scheduler.py
miguelvr/ClassyVision
38a59270e16fda83e160c5888b96c777cb78757b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum, auto from typing import Any, Dict, Sequence from . import ( ClassyParamScheduler, UpdateInterval, build_param_scheduler, register_param_scheduler, ) @register_param_scheduler("composite") class CompositeParamScheduler(ClassyParamScheduler): """ Composite parameter scheduler composed of intermediate schedulers. Takes a list of schedulers and a list of lengths corresponding to percentage of training each scheduler should run for. Schedulers are run in order. All values in lengths should sum to 1.0. Each scheduler also has a corresponding interval scale. If interval scale is 'fixed', the intermidiate scheduler will be run without any rescaling of the time. If interval scale is 'rescaled', intermediate scheduler is run such that each scheduler will start and end at the same values as it would if it were the only scheduler. Default is 'rescaled' for all schedulers. Example: .. code-block:: python update_interval = "step" schedulers = [ {"name": "constant", "value": 0.42}, {"name": "cosine_decay", "start_lr": 0.42, "end_lr": 0.0001} ] interval_scaling = ['rescaled', 'rescaled'], lengths = [0.3, 0.7] The parameter value will be 0.42 for the first [0%, 30%) of steps, and then will cosine decay from 0.42 to 0.0001 for [30%, 100%) of training. """ class IntervalScaling(Enum): RESCALED = auto() FIXED = auto() def __init__( self, schedulers: Sequence[ClassyParamScheduler], lengths: Sequence[float], update_interval: UpdateInterval, interval_scaling: Sequence[IntervalScaling], ): super().__init__() self.update_interval = update_interval self._lengths = lengths self._schedulers = schedulers self._interval_scaling = interval_scaling @classmethod def from_config(cls, config: Dict[str, Any]) -> "CompositeParamScheduler": """Instantiates a CompositeParamScheduler from a configuration. Args: config: A configuration for a CompositeParamScheduler. See :func:`__init__` for parameters expected in the config. Returns: A CompositeParamScheduler instance. """ assert ( "schedulers" in config and "lengths" in config ), "Composite scheduler needs both a list of schedulers and lengths" assert len(config["schedulers"]) == len( config["lengths"] ), "Schedulers and lengths must be same length" assert ( len(config["schedulers"]) > 0 ), "There must be at least one scheduler in the composite scheduler" assert ( abs(sum(config["lengths"]) - 1.0) < 1e-3 ), "The sum of all values in lengths must be 1" if sum(config["lengths"]) != 1.0: config["lengths"][-1] = 1.0 - sum(config["lengths"][:-1]) update_interval = UpdateInterval.STEP if "update_interval" in config: assert config["update_interval"] in { "step", "epoch", }, "Choices for update interval are 'step' or 'epoch'" update_interval = UpdateInterval[config["update_interval"].upper()] interval_scaling = [] if "interval_scaling" in config: assert len(config["schedulers"]) == len( config["interval_scaling"] ), "Schedulers and interval scaling must be the same length" for interval_scale in config["interval_scaling"]: assert interval_scale in { "fixed", "rescaled", }, "Choices for interval scaline are 'fixed' or 'rescaled'" interval_scaling.append(cls.IntervalScaling[interval_scale.upper()]) else: interval_scaling = [cls.IntervalScaling.RESCALED] * len( config["schedulers"] ) if "num_epochs" in config: # Propogate value to intermediate schedulers config["schedulers"] = [ dict(schedule, **{"num_epochs": config["num_epochs"]}) for schedule in config["schedulers"] ] return cls( schedulers=[ build_param_scheduler(scheduler) for scheduler in config["schedulers"] ], lengths=config["lengths"], update_interval=update_interval, interval_scaling=interval_scaling, ) def __call__(self, where: float): # Find scheduler corresponding to where i = 0 running_total = self._lengths[i] while (where + self.WHERE_EPSILON) > running_total and i < len( self._schedulers ) - 1: i += 1 running_total += self._lengths[i] scheduler = self._schedulers[i] scheduler_where = where interval_scale = self._interval_scaling[i] if interval_scale == self.IntervalScaling.RESCALED: # Calculate corresponding where % for scheduler scheduler_start = running_total - self._lengths[i] scheduler_where = (where - scheduler_start) / self._lengths[i] return scheduler(scheduler_where)
38.638889
86
0.612509
acedb6ad45e3ef71f976b739e3527d2a933e9e51
564
py
Python
app/__init__.py
glasses-png/News-API
668d19d61019b3ea0e3a76aaaa5ab95d0789f4ca
[ "MIT" ]
null
null
null
app/__init__.py
glasses-png/News-API
668d19d61019b3ea0e3a76aaaa5ab95d0789f4ca
[ "MIT" ]
null
null
null
app/__init__.py
glasses-png/News-API
668d19d61019b3ea0e3a76aaaa5ab95d0789f4ca
[ "MIT" ]
null
null
null
from flask import Flask from flask_bootstrap import Bootstrap from config import config_options,Config bootstrap = Bootstrap() def create_app(config_name): app = Flask(__name__) #create the app configurations app.config.from_object(config_options[config_name]) app.config.from_object(Config) # Initializing flask extensions bootstrap.init_app(app) #registering the blueprint from .main import main as main_blueprint app.register_blueprint(main_blueprint) #setting config from .requests import configure_request configure_request(app) return app
22.56
52
0.817376
acedb7b63f6b2bba8247ddfc0717cad42e387012
3,503
py
Python
algs/genetic_CNN/genetic/statusupdatetool.py
Beautyya/BenchENA
5f5491614fc2f00ca26dc29f35f44c334db4718c
[ "MIT" ]
null
null
null
algs/genetic_CNN/genetic/statusupdatetool.py
Beautyya/BenchENA
5f5491614fc2f00ca26dc29f35f44c334db4718c
[ "MIT" ]
null
null
null
algs/genetic_CNN/genetic/statusupdatetool.py
Beautyya/BenchENA
5f5491614fc2f00ca26dc29f35f44c334db4718c
[ "MIT" ]
null
null
null
import configparser import os import numpy as np from compute import Config_ini from train.utils import TrainConfig class StatusUpdateTool(object): @classmethod def clear_config(cls): config_file = os.path.join(os.path.dirname(__file__), 'global.ini') config = configparser.ConfigParser() config.read(config_file) config.write(open(config_file, 'w')) @classmethod def __write_ini_file(cls, section, key, value): config_file = os.path.join(os.path.dirname(__file__), 'global.ini') config = configparser.ConfigParser() config.read(config_file) config.set(section, key, value) config.write(open(config_file, 'w')) @classmethod def __read_ini_file(cls, section, key): config_file = os.path.join(os.path.dirname(__file__), 'global.ini') config = configparser.ConfigParser() config.read(config_file) return config.get(section, key) @classmethod def get_num_nodes(cls): rs = cls.__read_ini_file('settings', 'NUM_NODES') num_nodes = [] for i in rs.split(','): num_nodes.append(int(i)) return num_nodes @classmethod def get_num_class(cls): return TrainConfig.get_out_cls_num(Config_ini.dataset) @classmethod def get_input_size(cls): rs = TrainConfig.get_data_input_size(Config_ini.dataset) return rs[0] @classmethod def get_input_channel(cls): rs = TrainConfig.get_data_input_size(Config_ini.dataset) return rs[2] @classmethod def get_stages(cls): rs = cls.__read_ini_file('settings', 'STAGES') stages = [] for i in rs.split(','): stages.append(i) return stages @classmethod def get_mutation_prob(cls): rs = cls.__read_ini_file('settings', 'mutation_prob') return float(rs) @classmethod def get_crossover_prob(cls): rs = cls.__read_ini_file('settings', 'crossover_prob') return float(rs) @classmethod def get_init_params(cls): pop_size = Config_ini.pop_size max_gen = Config_ini.max_gen params = {} params['pop_size'] = pop_size params['max_gen'] = max_gen params['NUM_NODES'] = cls.get_num_nodes() params['STAGES'] = cls.get_stages() params['mutation_prob'] = cls.get_mutation_prob() params['crossover_prob'] = cls.get_crossover_prob() l, _, _ = cls.get_params() params['l'] = l return params @classmethod def get_params(cls): L = 0 BITS_INDICES, l_bpi = np.empty((0, 2), dtype=np.int32), 0 # to keep track of bits for each stage S for nn in cls.get_num_nodes(): t = nn * (nn - 1) BITS_INDICES = np.vstack([BITS_INDICES, [l_bpi, l_bpi + int(0.5 * t)]]) l_bpi += int(0.5 * t) L += t L = int(0.5 * L) return L, BITS_INDICES, l_bpi @classmethod def begin_evolution(cls): section = 'evolution_status' key = 'IS_RUNNING' cls.__write_ini_file(section, key, "1") @classmethod def end_evolution(cls): section = 'evolution_status' key = 'IS_RUNNING' cls.__write_ini_file(section, key, "0") @classmethod def is_evolution_running(cls): rs = cls.__read_ini_file('evolution_status', 'IS_RUNNING') if rs == '1': return True else: return False
29.686441
107
0.613189
acedb84f1a2d6f6836515395f7dfbb29ab20353c
4,314
py
Python
model_cyclegan_dropout.py
Gleiphir/AinuGAN
963dbca303ca13f053a25c9a4f07eb4eda614091
[ "MIT" ]
null
null
null
model_cyclegan_dropout.py
Gleiphir/AinuGAN
963dbca303ca13f053a25c9a4f07eb4eda614091
[ "MIT" ]
null
null
null
model_cyclegan_dropout.py
Gleiphir/AinuGAN
963dbca303ca13f053a25c9a4f07eb4eda614091
[ "MIT" ]
null
null
null
# Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import torch import torch.nn as nn import torch.nn.functional as F class Discriminator(nn.Module): def __init__(self,dropout:float): super(Discriminator, self).__init__() self.main = nn.Sequential( nn.Conv2d(3, 64, (4,4), stride=(2,2), padding=(1,1)), nn.LeakyReLU(0.2, inplace=True), #nn.Dropout2d(dropout,inplace=True), nn.Conv2d(64, 128, (4,4), stride=(2,2), padding=(1,1)), nn.InstanceNorm2d(128), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(dropout), nn.Conv2d(128, 256, (4,4), stride=(2,2), padding=(1,1)), nn.InstanceNorm2d(256), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(dropout), nn.Conv2d(256, 512, (4,4), padding=(1,1)), nn.InstanceNorm2d(512), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(dropout), nn.Conv2d(512, 1, (4,4), padding=(1,1)), ) def forward(self, x): x = self.main(x) x = F.avg_pool2d(x, x.size()[2:]) x = torch.flatten(x, 1) return x z_dim = 128 class Generator(nn.Module): def __init__(self,dropout:float): super(Generator, self).__init__() # self.unfold = nn.Linear(z_dim,64*64*3) self.main = nn.Sequential( # Initial convolution block nn.ReflectionPad2d(3), nn.Conv2d(3, 64, (7,7)), nn.InstanceNorm2d(64), nn.ReLU(inplace=True), # Downsampling nn.Conv2d(64, 128, (3,3), stride=(2,2), padding=(1,1)), nn.InstanceNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 256, (3,3), stride=(2,2), padding=(1,1)), nn.InstanceNorm2d(256), nn.ReLU(inplace=True), # Residual blocks ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), ResidualBlock(256,dropout=dropout), # Upsampling nn.ConvTranspose2d(256, 128, (3,3), stride=(2,2), padding=(1,1), output_padding=(1,1)), nn.InstanceNorm2d(128), nn.ReLU(inplace=True), nn.ConvTranspose2d(128, 64, (3,3), stride=(2,2), padding=(1,1), output_padding=(1,1)), nn.InstanceNorm2d(64), nn.ReLU(inplace=True), # Output layer nn.ReflectionPad2d(3), nn.Conv2d(64, 3, (7,7)), nn.Tanh() ) def forward(self, x): x = self.unfold(x).view(-1,3,64,64) return self.main(x) class ResidualBlock(nn.Module): def __init__(self, in_channels,dropout): super(ResidualBlock, self).__init__() self.res = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(in_channels, in_channels, (3,3)), nn.InstanceNorm2d(in_channels), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(in_channels, in_channels, (3,3)), nn.InstanceNorm2d(in_channels), nn.Dropout2d(dropout), ) def forward(self, x): return x + self.res(x)
33.44186
99
0.541493
acedb8829727d5d4216f9439a7cd984d362a306a
2,944
py
Python
src/Python/pipe_to_mongo.py
charlesdungy/nyc-service-requests-pipeline
b7c7b350e34a4052baadafc146616b81bbcc7adb
[ "MIT" ]
null
null
null
src/Python/pipe_to_mongo.py
charlesdungy/nyc-service-requests-pipeline
b7c7b350e34a4052baadafc146616b81bbcc7adb
[ "MIT" ]
null
null
null
src/Python/pipe_to_mongo.py
charlesdungy/nyc-service-requests-pipeline
b7c7b350e34a4052baadafc146616b81bbcc7adb
[ "MIT" ]
null
null
null
from connect_to_db import ConnectToDB from pymongo import errors from pymongo.operations import ReplaceOne class PipeToMongo(ConnectToDB): def __init__(self, connection_type): """ """ super().__init__(connection_type) def insert_into_mongodb(self, response): """ """ result = None try: operations = self.create_operations_to_insert(response) result = self.execute_bulk_insert(operations) except errors.BulkWriteError as err: self.logger.log_result( 'batch op errors occurred', 'MongoDB', 'error' ) finally: return result def create_operations_to_insert(self, response): """ """ batched_operations = [] operations = [] for document in response: if ('closed_date' not in document): document['closed_date'] = '' try: operations.append( ReplaceOne( filter={'unique_key': document['unique_key']}, replacement=document, upsert=True ) ) except Exception as e: self.logger.log_result( f'errors occurred creating operations: {e}', 'MongoDB', 'error' ) if (len(operations) == 1000): batched_operations.append(operations) operations = [] if (len(operations) > 0): batched_operations.append(operations) return batched_operations def execute_bulk_insert(self, batched_operations): """ """ result = self.get_result_dict() for operations in batched_operations: try: bulk_result = self.mongo_collection.bulk_write( requests=operations, ordered=False ) result['inserted_count'] += bulk_result.inserted_count result['matched_count'] += bulk_result.matched_count result['upserted_count'] += bulk_result.upserted_count if (bulk_result.acknowledged is False): result['success'] = False result['batch_result'] += '0' else: result['batch_result'] += '1' except Exception as e: self.logger.log_result( f'error occurred bulk writing operations: {e}', 'MongoDB', 'error' ) return result def get_result_dict(self): """ """ return { 'inserted_count': 0, 'matched_count': 0, 'upserted_count': 0, 'success': True, 'batch_result': '' }
30.989474
70
0.495924
acedbb074e2a330c505a47cbbe9237b2faf29fb5
2,323
py
Python
mozc-nazoru/setup.py
ikeji/mozc-devices
59a3805b539ba02eaf68eac1f5664fac0912420d
[ "Apache-2.0" ]
1,002
2016-03-31T15:49:47.000Z
2022-03-31T14:53:54.000Z
mozc-nazoru/setup.py
ikeji/mozc-devices
59a3805b539ba02eaf68eac1f5664fac0912420d
[ "Apache-2.0" ]
15
2016-03-31T23:24:08.000Z
2021-12-23T04:52:42.000Z
mozc-nazoru/setup.py
ikeji/mozc-devices
59a3805b539ba02eaf68eac1f5664fac0912420d
[ "Apache-2.0" ]
122
2016-03-31T16:44:32.000Z
2022-03-26T10:05:25.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import codecs import os from setuptools import setup, find_packages def read_file(name): with codecs.open( os.path.join(os.path.dirname(__file__), name), 'r', 'utf-8') as f: return f.read().strip() setup( name='nazoru-input', version='0.1.2', author='Makoto Shimazu', author_email='shimazu@google.com', url='https://landing.google.com/tegaki', description='Package for Gboard Physical Handwriting Version', long_description=read_file('README.rst'), license='Apache', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'License :: OSI Approved :: Apache Software License', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], packages=find_packages('src'), package_dir={'': 'src'}, package_data={ 'nazoru': ['data/optimized_nazoru.pb'] }, scripts=[ 'bin/nazoru-input', 'bin/nazoru-training' ], # For installing the nazoru_input as a service of systemd. Please uncomment # the following |data_files| if you want to install nazoru.service. # data_files=[('/etc/systemd/system', ['data/nazoru.service'])], install_requires=[ 'cairocffi', 'pillow', 'tensorflow~=2.5.1', 'tf_slim~=1.1.0', 'enum34;python_version<"3.4"', 'pyserial', 'evdev;platform_system=="Linux"', 'wiringpi;platform_system=="Linux"' ] )
32.263889
79
0.650022
acedbb29a62ecc282de6cc26987998ef7ddb5f18
22,188
py
Python
ekman/seaice.py
uesleisutil/Ekman
fc6b0f3ef8dd41da17f1dfc253a5999444f3f00c
[ "MIT" ]
1
2021-07-24T23:42:34.000Z
2021-07-24T23:42:34.000Z
ekman/seaice.py
uesleisutil/Ekman
fc6b0f3ef8dd41da17f1dfc253a5999444f3f00c
[ "MIT" ]
null
null
null
ekman/seaice.py
uesleisutil/Ekman
fc6b0f3ef8dd41da17f1dfc253a5999444f3f00c
[ "MIT" ]
1
2021-01-07T19:57:20.000Z
2021-01-07T19:57:20.000Z
""" This file generates a new ROMS output file from scratch. It is netCDF4 CF-compliant. TODO: Find a way to loop over a list of strings to delete all this crappy infinite command lines. >( """ from netCDF4 import Dataset from matplotlib import path from progress.bar import IncrementalBar import numpy as np import time seaseaiceFillVal = 1.e+37 class seaice(object): def vars(seaiceOriDir,seaiceNewDir): """ Generates a new WRF output file from scratch. Parameters ---------- >>> seaseaiceBox = [lon_min, lon_max, lat_min, lat_max] >>> seaiceLevel = np.arange(min(seaiceLevel), max(seaiceLevel)+1) >>> seaiceTStep = np.arange(min(seaiceTStep), max(seaiceTStep)+1) """ # kwargs seaiceBox = kwargs.get('seaiceBox') seaiceTStep = kwargs.get('seaiceTStep') age = kwargs.get('age') aice = kwargs.get('aice') hice = kwargs.get('hice') vice = kwargs.get('vice') uice = kwargs.get('uice') snowthick = kwargs.get('snowthick') tisrf = kwargs.get('tisrf') iomflx = kwargs.get('iomflx') ti = kwargs.get('ti') # Original output file. iceRawFile = Dataset(iceOriDir, mode='r') iceNewFile = Dataset(iceNewDir, 'w', format='NETCDF4') iceNewFile.title = "Budgell Sea-ice output file" iceNewFile.description = "Created with Ekman Toolbox in " + time.ctime(time.time()) iceNewFile.link = "https://github.com/uesleisutil/Ekman" if seaiceBox is not None: def bbox2ij(lon,lat,seaiceBox=[-160., -155., 18., 23.]): """Return indices for i,j that will completely cover the specified bounding box. i0,i1,j0,j1 = bbox2ij(lon,lat,seaiceBox) lon,lat = 2D arrays that are the target of the subset seaiceBox = list containing the bounding box: [lon_min, lon_max, lat_min, lat_max] Example ------- >>> i0,i1,j0,j1 = bbox2ij(lon_rho,[-71, -63., 39., 46]) >>> h_subset = nc.variables['h'][j0:j1,i0:i1] """ seaiceBox=np.array(seaiceBox) mypath=np.array([seaiceBox[[0,1,1,0]],seaiceBox[[2,2,3,3]]]).T p = path.Path(mypath) points = np.vstack((lon.flatten(),lat.flatten())).T n,m = np.shape(lon) inside = p.contains_points(points).reshape((n,m)) ii,jj = np.meshgrid(range(m),range(n)) return min(ii[inside]),max(ii[inside]),min(jj[inside]),max(jj[inside]) lon_rho = iceRawFile.variables['lon_rho'][:,:] lat_rho = iceRawFile.variables['lat_rho'][:,:] i0,i1,j0,j1 = bbox2ij(lon_rho,lat_rho,seaiceBox) lon_rho = iceRawFile.variables['lon_rho'][j0:j1, i0:i1] lat_rho = iceRawFile.variables['lat_rho'][j0:j1, i0:i1] iceNewFile.createDimension('eta_rho', len(lon_rho[:,0])) iceNewFile.createDimension('xi_rho', len(lon_rho[0,:])) print("Bounding box selected. New domain limits are: Longitude "+str(seaiceBox[0])+"/"+str(seaiceBox[1])+" and Latitude "+str(seaiceBox[2])+"/"+str(seaiceBox[3])+".") else: print("No bounding box selected: Using XLAT and XLONG variables from input file.") lon_rho = iceNewFile.variables['lon_rho'][:,:] lat_rho = iceNewFile.variables['lat_rho'][:,:] eta_rho = iceNewFile.dimensions['eta_rho'] xi_rho = iceNewFile.dimensions['xi_rho'] iceNewFile.createDimension('eta_rho', len(eta_rho)) iceNewFile.createDimension('xi_rho', len(xi_rho)) iceNewLon = iceNewFile.createVariable('lon_rho', 'd', ('eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewLon.long_name = 'Longitude on RHO-points' iceNewLon.units = 'degree_east' iceNewLon.standard_name = 'longitude' iceNewLon[:,:] = lon_rho iceNewLat = iceNewFile.createVariable('lat_rho', 'd', ('eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewLat.long_name = 'Latitude on RHO-points' iceNewLat.units = 'degree_north' iceNewLat.standard_name = 'latitude' iceNewLat[:, :] = lat_rho # New Sea-ice output file. iceNewFile.createDimension('ocean_time', 0) ice_time= iceRawFile.variables['ocean_time'] iceNewOTdim = iceNewFile.createVariable('ocean_time', dtype('double').char, ('ocean_time')) iceNewOTdim.long_name = ice_time.units iceNewOTdim.units = ice_time.units s_rho = iceRawFile.dimensions['s_rho'] s_w = iceRawFile.dimensions['s_w'] if seaiceTStep == True: ntimes = seaiceTStep print("Time-step selected: Working from time-step "+str(np.argmin(ntimes))+" to "+str(np.argmax(ntimes))+".") else: ntimes = iceRawFile.variables['ocean_time'][:] print("No time-step selected. Working with entire time-step.") # If Budgell Sea-Ice has been chosen. if age == True: print('Working on Budgell Sea-Ice Age.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['ageice'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('ageice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Sea-ice age' iceNewVar.units = 'S' iceNewVar[i,:,:] = iceRawVar else: iceRawFile = iceRawFile.variables['ageice'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['ageice'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('ageice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Sea-ice age' iceNewVar.units = 'S' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['ageice'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Sea-Ice Fraction of Cell Covered by Ice has been chosen. if aice == True: print('Working on Budgell Sea-Ice Fraction of Cell Covered by Ice.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['aice'][ntimes[0]+ii,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('aice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Fraction of Cell Covered by Ice' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['aice'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['aice'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('aice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Fraction of Cell Covered by Ice' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['aice'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Sea-Ice Average Ice Thickness in Cell has been chosen. if hice == True: print('Working on Budgell Sea-Ice Average Ice Thickness in Cell.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['hice'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('hice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Average Ice Thickness in Cell' iceNewVar.units = 'meters' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['hice'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['hice'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('hice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Average Ice Thickness in Cell' iceNewVar.units = 'meters' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['hice'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Sea-Ice V-Velocity has been chosen. if vice == True: print('Working on Budgell Sea-Ice V-Velocity.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['vice'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('vice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'V-component of Ice Velocity' iceNewVar.units = 'm s-1' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['vice'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['vice'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('vice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'V-component of Ice Velocity' iceNewVar.units = 'm s-1' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['vice'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Sea-Ice U-Velocity has been chosen. if uice == True: print('Working on Budgell Sea-Ice U-Velocity.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['uice'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('uice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'U-component of Ice Velocity' iceNewVar.units = 'm s-1' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['uice'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['uice'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('uice', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'U-component of Ice Velocity' iceNewVar.units = 'm s-1' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['uice'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Sea-cover Thickness has been chosen. if snowthick == True: print('Working on Budgell Sea-cover Thickness.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['snow_thick'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('snow_thick', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Sea-cover Thickness' iceNewVar.units = 'meter' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['snow_thick'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['snow_thick'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('snow_thick', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Sea-cover Thickness' iceNewVar.units = 'meter' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['snow_thick'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Sea-ice Surface Temperature has been chosen. if tisrf == True: print('Working on Budgell Sea-ice Surface Temperature.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['tisrf'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('tisrf', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Sea-Ice Surface Temperature' iceNewVar.units = 'Degree Celsius' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['tisrf'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['tisrf'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('tisrf', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Sea-Ice Surface Temperature' iceNewVar.units = 'Degree Celsius' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['tisrf'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Ice-Ocean Mass Flux has been chosen. if iomflx == True: print('Working on Budgell Ice-Ocean Mass Flux.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['iomflx'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('iomflx', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Ice-Ocean Mass Flux' iceNewVar.units = 'm s-1' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['iomflx'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['iomflx'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('iomflx', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Ice-Ocean Mass Flux' iceNewVar.units = 'm s-1' iceNewVar[i,:,:] = iceRawVar else: iceRawFile = iceRawFile.variables['iomflx'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() # If Budgell Interior Ice Temperature has been chosen. if ti == True: print('Working on Budgell Interior Ice Temperature.') bar = IncrementalBar(max=len(ntimes)) for i in range(np.argmin(ntimes),len(ntimes),1): if seaiceBox == True: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['ti'][ntimes[0]+i,j0:j1, i0:i1] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('ti', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Interior Ice Temperature' iceNewVar.units = 'Degree Celcius' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['ti'][ntimes[0]+i,j0:j1,i0:i1] iceNewVar[i,:,:] = iceRawVar else: if i == np.argmin(ntimes): iceRawVar = iceRawFile.variables['ti'][ntimes[0]+i,:,:] iceNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)]) iceNewVar = iceNewFile.createVariable('ti', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=seaiceFillVal) iceNewVar.long_name = 'Interior Ice Temperature' iceNewVar.units = 'Degree Celcius' iceNewVar[i,:,:] = iceRawVar else: iceRawVar = iceRawFile.variables['ti'][ntimes[0]+i,:,:] iceNewVar[i,:,:] = iceRawVar bar.next() bar.finish() iceNewFile.close()
57.481865
185
0.487966
acedbb63b98f140355353a68c424e165bfe67366
346
py
Python
app/api/router.py
tanimutomo/app-on-gpu
7a4ca5b962f249be1f0932bd96b2980c2ec9537c
[ "MIT" ]
1
2020-09-04T02:38:43.000Z
2020-09-04T02:38:43.000Z
app/api/router.py
tanimutomo/app-on-gpu
7a4ca5b962f249be1f0932bd96b2980c2ec9537c
[ "MIT" ]
null
null
null
app/api/router.py
tanimutomo/app-on-gpu
7a4ca5b962f249be1f0932bd96b2980c2ec9537c
[ "MIT" ]
null
null
null
from flask import request from api import app from api import controllers as con from api import service @app.before_first_request def init(): service.init() @app.route("/health") def health(): return con.health() @app.route("/image", methods=["POST"]) def image(): img_b64 = request.json["image"] return con.image(img_b64)
16.47619
38
0.699422
acedbb84278d0a7f9b2a439299d8f5ef61b9f2ea
4,733
py
Python
src/modlunky2/sprites/sprite_loaders.py
Gugubo/modlunky2
915a829f610b393bfe50e69d542d4d39c283fcad
[ "Apache-2.0" ]
null
null
null
src/modlunky2/sprites/sprite_loaders.py
Gugubo/modlunky2
915a829f610b393bfe50e69d542d4d39c283fcad
[ "Apache-2.0" ]
null
null
null
src/modlunky2/sprites/sprite_loaders.py
Gugubo/modlunky2
915a829f610b393bfe50e69d542d4d39c283fcad
[ "Apache-2.0" ]
null
null
null
from typing import Optional from modlunky2.constants import BASE_DIR from modlunky2.sprites.items import ItemSheet from modlunky2.sprites.coffins import CoffinSheet from modlunky2.sprites.journal_stickers import StickerSheet from modlunky2.sprites.journal_items import JournalItemSheet from modlunky2.sprites.journal_people import JournalPeopleSheet from modlunky2.sprites.journal_mons import JournalMonsterSheet from modlunky2.sprites.journal_mons_big import JournalBigMonsterSheet from modlunky2.sprites.journal_place import JournalPlaceSheet from modlunky2.sprites.journal_traps import JournalTrapSheet from modlunky2.sprites.character import ( CharacterBlackSheet, CharacterBlueSheet, CharacterCeruleanSheet, CharacterCinnabarSheet, CharacterCyanSheet, CharacterEggChildSheet, CharacterGoldSheet, CharacterGraySheet, CharacterGreenSheet, CharacterHiredHandSheet, CharacterIrisSheet, CharacterKhakiSheet, CharacterLemonSheet, CharacterLimeSheet, CharacterMagentaSheet, CharacterOliveSheet, CharacterOrangeSheet, CharacterPinkSheet, CharacterRedSheet, CharacterVioletSheet, CharacterWhiteSheet, CharacterYellowSheet, ) from modlunky2.sprites.monsters.mounts import Mounts from modlunky2.sprites.monsters.pets import Pets from modlunky2.sprites.monsters.ghost import Ghost from modlunky2.sprites.monsters.basic import ( Basic1, Basic2, Basic3, Monsters1, Monsters2, Monsters3, ) from modlunky2.sprites.monsters.big import ( Big1, Big2, Big3, Big4, Big5, Big6, OsirisAndAlienQueen, OlmecAndMech, ) from modlunky2.sprites.tilecode_extras import ( ChainAndBlocksCeilingSheet, TilecodeExtras, TreasureVaultChestSheet, ) from modlunky2.sprites.menu_leader import MenuLeaderSheet from modlunky2.sprites.menu_basic import MenuBasicSheet, PetHeadsSheet from modlunky2.sprites.deco_sheet import CaveDecoSheet def get_all_sprite_loaders( entities_json: Optional[dict], textures_json: Optional[dict], base_path: str ): return [ ItemSheet(base_path), CoffinSheet(base_path), StickerSheet(base_path), JournalItemSheet(base_path), JournalPeopleSheet(base_path), JournalMonsterSheet(base_path), JournalBigMonsterSheet(base_path), JournalPlaceSheet(base_path), JournalTrapSheet(base_path), CharacterBlackSheet(base_path), CharacterLimeSheet(base_path), CharacterMagentaSheet(base_path), CharacterOliveSheet(base_path), CharacterOrangeSheet(base_path), CharacterPinkSheet(base_path), CharacterRedSheet(base_path), CharacterVioletSheet(base_path), CharacterWhiteSheet(base_path), CharacterYellowSheet(base_path), CharacterBlueSheet(base_path), CharacterCeruleanSheet(base_path), CharacterCinnabarSheet(base_path), CharacterCyanSheet(base_path), CharacterEggChildSheet(base_path), CharacterGoldSheet(base_path), CharacterGraySheet(base_path), CharacterGreenSheet(base_path), CharacterHiredHandSheet(base_path), CharacterIrisSheet(base_path), CharacterKhakiSheet(base_path), CharacterLemonSheet(base_path), Mounts(entities_json, textures_json, base_path), Pets(entities_json, textures_json, base_path), MenuLeaderSheet(base_path), MenuBasicSheet(base_path), PetHeadsSheet(BASE_DIR / "static"), Basic1(entities_json, textures_json, base_path), Basic2(entities_json, textures_json, base_path), Basic3(entities_json, textures_json, base_path), Monsters1(entities_json, textures_json, base_path), Monsters2(entities_json, textures_json, base_path), Monsters3(entities_json, textures_json, base_path), Big1(entities_json, textures_json, base_path), Big2(entities_json, textures_json, base_path), Big3(entities_json, textures_json, base_path), Big4(entities_json, textures_json, base_path), Big5(entities_json, textures_json, base_path), Big6(entities_json, textures_json, base_path), OsirisAndAlienQueen(entities_json, textures_json, base_path), OlmecAndMech(entities_json, textures_json, base_path), Ghost(entities_json, textures_json, base_path), CaveDecoSheet(base_path), # These uses the constant BASE_DIR as the base path as this # texture is bundled with the source rather than coming # from the extracted assets. TilecodeExtras(BASE_DIR), TreasureVaultChestSheet(BASE_DIR), ChainAndBlocksCeilingSheet(BASE_DIR), ]
35.856061
80
0.745193
acedbbe694b4d9e3c37ac9d901b90584a614684a
4,352
py
Python
feedzilla/migrations/0008_set_rawtags.py
feedzilla/feedzilla
cd9efe6ce1e44aeda741f0279cc3b36ea1ed2264
[ "BSD-3-Clause" ]
2
2015-05-25T11:34:45.000Z
2020-06-23T22:29:01.000Z
feedzilla/migrations/0008_set_rawtags.py
feedzilla/feedzilla
cd9efe6ce1e44aeda741f0279cc3b36ea1ed2264
[ "BSD-3-Clause" ]
null
null
null
feedzilla/migrations/0008_set_rawtags.py
feedzilla/feedzilla
cd9efe6ce1e44aeda741f0279cc3b36ea1ed2264
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): orm['feedzilla.Post'].objects.update(rawtags=models.F('tags')) def backwards(self, orm): "Write your backwards methods here." models = { u'feedzilla.feed': { 'Meta': {'object_name': 'Feed'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'active_post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'etag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'feed_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'site_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'skip_filters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'feedzilla.filtertag': { 'Meta': {'object_name': 'FilterTag'}, 'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'feedzilla.filterword': { 'Meta': {'object_name': 'FilterWord'}, 'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'feedzilla.post': { 'Meta': {'ordering': "['-created']", 'object_name': 'Post'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': u"orm['feedzilla.Feed']"}), 'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link': ('django.db.models.fields.TextField', [], {}), 'rawtags': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'tags': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'feedzilla.request': { 'Meta': {'ordering': "['-created']", 'object_name': 'Request'}, 'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'feed_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) } } complete_apps = ['feedzilla'] symmetrical = True
62.171429
148
0.547794
acedbc25552b5c2092b60edba5b48198a956ed4e
4,697
py
Python
texttospeech/tests/unit/gapic/v1beta1/test_text_to_speech_client_v1beta1.py
theacodes/google-cloud-python
57dafcb78540e12c82f7ca0fc77d75edeb269390
[ "Apache-2.0" ]
1
2020-10-25T04:39:41.000Z
2020-10-25T04:39:41.000Z
texttospeech/tests/unit/gapic/v1beta1/test_text_to_speech_client_v1beta1.py
theacodes/google-cloud-python
57dafcb78540e12c82f7ca0fc77d75edeb269390
[ "Apache-2.0" ]
4
2018-11-13T22:15:36.000Z
2018-12-07T18:31:38.000Z
texttospeech/tests/unit/gapic/v1beta1/test_text_to_speech_client_v1beta1.py
theacodes/google-cloud-python
57dafcb78540e12c82f7ca0fc77d75edeb269390
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests.""" import mock import pytest from google.cloud import texttospeech_v1beta1 from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2 class MultiCallableStub(object): """Stub for the grpc.UnaryUnaryMultiCallable interface.""" def __init__(self, method, channel_stub): self.method = method self.channel_stub = channel_stub def __call__(self, request, timeout=None, metadata=None, credentials=None): self.channel_stub.requests.append((self.method, request)) response = None if self.channel_stub.responses: response = self.channel_stub.responses.pop() if isinstance(response, Exception): raise response if response: return response class ChannelStub(object): """Stub for the grpc.Channel interface.""" def __init__(self, responses=[]): self.responses = responses self.requests = [] def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) class CustomException(Exception): pass class TestTextToSpeechClient(object): def test_list_voices(self): # Setup Expected Response expected_response = {} expected_response = cloud_tts_pb2.ListVoicesResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch('google.api_core.grpc_helpers.create_channel') with patch as create_channel: create_channel.return_value = channel client = texttospeech_v1beta1.TextToSpeechClient() response = client.list_voices() assert expected_response == response assert len(channel.requests) == 1 expected_request = cloud_tts_pb2.ListVoicesRequest() actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_voices_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch('google.api_core.grpc_helpers.create_channel') with patch as create_channel: create_channel.return_value = channel client = texttospeech_v1beta1.TextToSpeechClient() with pytest.raises(CustomException): client.list_voices() def test_synthesize_speech(self): # Setup Expected Response audio_content = b'16' expected_response = {'audio_content': audio_content} expected_response = cloud_tts_pb2.SynthesizeSpeechResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch('google.api_core.grpc_helpers.create_channel') with patch as create_channel: create_channel.return_value = channel client = texttospeech_v1beta1.TextToSpeechClient() # Setup Request input_ = {} voice = {} audio_config = {} response = client.synthesize_speech(input_, voice, audio_config) assert expected_response == response assert len(channel.requests) == 1 expected_request = cloud_tts_pb2.SynthesizeSpeechRequest( input=input_, voice=voice, audio_config=audio_config) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_synthesize_speech_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch('google.api_core.grpc_helpers.create_channel') with patch as create_channel: create_channel.return_value = channel client = texttospeech_v1beta1.TextToSpeechClient() # Setup request input_ = {} voice = {} audio_config = {} with pytest.raises(CustomException): client.synthesize_speech(input_, voice, audio_config)
33.55
79
0.674473
acedbd88091a88703b2bc3ce952f9eba6dd914d1
1,114
py
Python
share/rpcuser/rpcuser.py
brocoinofficial/brocoinofficial
b4c77b228e52b18e3f315585e18088828c5151fd
[ "MIT" ]
null
null
null
share/rpcuser/rpcuser.py
brocoinofficial/brocoinofficial
b4c77b228e52b18e3f315585e18088828c5151fd
[ "MIT" ]
null
null
null
share/rpcuser/rpcuser.py
brocoinofficial/brocoinofficial
b4c77b228e52b18e3f315585e18088828c5151fd
[ "MIT" ]
1
2021-11-18T16:16:26.000Z
2021-11-18T16:16:26.000Z
#!/usr/bin/env python2 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import hashlib import sys import os from random import SystemRandom import base64 import hmac if len(sys.argv) < 2: sys.stderr.write('Please include username as an argument.\n') sys.exit(0) username = sys.argv[1] #This uses os.urandom() underneath cryptogen = SystemRandom() #Create 16 byte hex salt salt_sequence = [cryptogen.randrange(256) for i in range(16)] hexseq = list(map(hex, salt_sequence)) salt = "".join([x[2:] for x in hexseq]) #Create 32 byte b64 password password = base64.urlsafe_b64encode(os.urandom(32)) digestmod = hashlib.sha256 if sys.version_info.major >= 3: password = password.decode('utf-8') digestmod = 'SHA256' m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod) result = m.hexdigest() print("String to be appended to brocoin.conf:") print("rpcauth="+username+":"+salt+"$"+result) print("Your password:\n"+password)
26.52381
79
0.728007
acedbd9e93e065bdbc6a14763c92f01b3a4f1233
3,650
py
Python
keras2onnx/ke2onnx/upsample.py
mgoldchild/keras-onnx
8e700572b89a907ca21a3096556f64b62b7aa76c
[ "MIT" ]
1
2021-07-23T21:01:37.000Z
2021-07-23T21:01:37.000Z
keras2onnx/ke2onnx/upsample.py
souptc/keras-onnx
c08d52bf4d4ec2bba69ec4ffd2ea14f47fecb1f5
[ "MIT" ]
null
null
null
keras2onnx/ke2onnx/upsample.py
souptc/keras-onnx
c08d52bf4d4ec2bba69ec4ffd2ea14f47fecb1f5
[ "MIT" ]
1
2020-10-01T09:26:58.000Z
2020-10-01T09:26:58.000Z
############################################################################### # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. ############################################################################### import collections from ..common.onnx_ops import apply_transpose, apply_upsample from .common import get_permutation_config def convert_keras_upsample(scope, operator, container, n_dims): op = operator.raw_operator # op.size type is tuple in keras.io, even if we set a int in keras.layers API. # op.size type can be int in tf.keras. op_size = op.size if isinstance(op.size, collections.Iterable) else [op.size] scales_sub = list(d for d in op_size) if n_dims == 1: shape_gap = len(op.input_shape) - len(scales_sub) if shape_gap == 1: scales = [1] + scales_sub elif shape_gap == 2: scales = [1] + scales_sub + [1] else: raise ValueError('shape_gap should be 1 or 2 for UpSampling1D') elif n_dims == 2 or n_dims == 3: # Always create the list of sampling factors in channels_first format because the input will be converted into # channels_first if it's in channels_last scales = [1, 1] + list(d for d in op_size) else: raise ValueError('Unsupported dimension %s when converting Keras Upsampling layer' % n_dims) mode = 'nearest' if hasattr(op, 'interpolation'): mode = 'linear' if op.interpolation.endswith('linear') else op.interpolation # Derive permutation configuration. If the Keras input format is not channels_first, this configuration may be used # to manipulate the input and output of ONNX Upsample. input_perm_axes, output_perm_axes = get_permutation_config(n_dims) channels_first = n_dims > 1 and op.data_format == 'channels_first' no_permutation_required = channels_first or n_dims < 2 # Before creating the main Upsample operator, we need to permute the input tensor if the original operator is # working under channels_last mode. if no_permutation_required: # No permutation is required. Use input as it is. input_tensor_name = operator.inputs[0].full_name else: # Permute the original input and then use the permuted result as the input of ONNX Upsample input_tensor_name = scope.get_unique_variable_name(operator.inputs[0].full_name + '_permuted') apply_transpose(scope, operator.inputs[0].full_name, input_tensor_name, container, perm=input_perm_axes) # If no_permutation_required is True, we don't need to permute the output of ONNX Upsample. Otherwise, similar to Crop's # conversion, a Transpose would be added. if no_permutation_required: apply_upsample(scope, input_tensor_name, operator.outputs[0].full_name, container, mode=mode, scales=scales) else: upsampled_tensor_name = scope.get_unique_variable_name(input_tensor_name + '_upsampled') apply_upsample(scope, input_tensor_name, upsampled_tensor_name, container, mode=mode, scales=scales) apply_transpose(scope, upsampled_tensor_name, operator.outputs[0].full_name, container, perm=output_perm_axes) def convert_keras_upsample_1d(scope, operator, container): convert_keras_upsample(scope, operator, container, n_dims=1) def convert_keras_upsample_2d(scope, operator, container): convert_keras_upsample(scope, operator, container, n_dims=2) def convert_keras_upsample_3d(scope, operator, container): convert_keras_upsample(scope, operator, container, n_dims=3)
50.694444
124
0.700822
acedbdf8078ef274a75f682133bc33dbdfa1e77a
636
py
Python
airbyte-integrations/bases/base-python/base_python/cdk/streams/auth/core.py
OTRI-Unipd/OTRI-airbyte
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
[ "MIT" ]
6,215
2020-09-21T13:45:56.000Z
2022-03-31T21:21:45.000Z
airbyte-integrations/bases/base-python/base_python/cdk/streams/auth/core.py
OTRI-Unipd/OTRI-airbyte
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
[ "MIT" ]
8,448
2020-09-21T00:43:50.000Z
2022-03-31T23:56:06.000Z
airbyte-integrations/bases/base-python/base_python/cdk/streams/auth/core.py
OTRI-Unipd/OTRI-airbyte
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
[ "MIT" ]
1,251
2020-09-20T05:48:47.000Z
2022-03-31T10:41:29.000Z
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # from abc import ABC, abstractmethod from typing import Any, Mapping class HttpAuthenticator(ABC): """ Base abstract class for various HTTP Authentication strategies. Authentication strategies are generally expected to provide security credentials via HTTP headers. """ @abstractmethod def get_auth_header(self) -> Mapping[str, Any]: """ :return: A dictionary containing all the necessary headers to authenticate. """ class NoAuth(HttpAuthenticator): def get_auth_header(self) -> Mapping[str, Any]: return {}
24.461538
107
0.698113
acedbe798785a15aad9d58a6864b985b9343bfa9
12,502
py
Python
scalabel/eval/detect.py
drorvt/scalabel
5e60e9b75d83ffa560410e2185a87162ce5c121d
[ "Apache-2.0" ]
null
null
null
scalabel/eval/detect.py
drorvt/scalabel
5e60e9b75d83ffa560410e2185a87162ce5c121d
[ "Apache-2.0" ]
null
null
null
scalabel/eval/detect.py
drorvt/scalabel
5e60e9b75d83ffa560410e2185a87162ce5c121d
[ "Apache-2.0" ]
null
null
null
"""Detection evaluation code. The prediction and ground truth are expected in scalabel format. The evaluation results are from the COCO toolkit. """ import argparse import copy import json from functools import partial from multiprocessing import Pool from typing import AbstractSet, Callable, Dict, List, Optional import numpy as np from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval # type: ignore from ..common.io import open_write_text from ..common.logger import logger from ..common.parallel import NPROC from ..common.typing import DictStrAny from ..label.coco_typing import GtType from ..label.io import load, load_label_config from ..label.to_coco import scalabel2coco_detection from ..label.typing import Config, Frame from .result import OVERALL, Result, Scores, ScoresList class DetResult(Result): """The class for bounding box detection evaluation results.""" AP: List[Dict[str, float]] AP50: List[Dict[str, float]] AP75: List[Dict[str, float]] APs: List[Dict[str, float]] APm: List[Dict[str, float]] APl: List[Dict[str, float]] AR1: List[Dict[str, float]] AR10: List[Dict[str, float]] AR100: List[Dict[str, float]] ARs: List[Dict[str, float]] ARm: List[Dict[str, float]] ARl: List[Dict[str, float]] def __init__(self, **data: ScoresList) -> None: """Set extra parameters.""" super().__init__(**data) self._formatters = { metric: "{:.1f}".format for metric in self.__fields__ } # pylint: disable=useless-super-delegation def __eq__(self, other: "Result") -> bool: # type: ignore """Check whether two instances are equal.""" return super().__eq__(other) def summary( self, include: Optional[AbstractSet[str]] = None, exclude: Optional[AbstractSet[str]] = None, ) -> Scores: """Convert the data into a flattened dict as the summary.""" summary_dict = super().summary(include, exclude) for scores in self.AP: for category, score in scores.items(): if category == OVERALL: continue summary_dict["{}/{}".format("AP", category)] = score return summary_dict class COCOV2(COCO): # type: ignore """Modify the COCO API to support annotations dictionary as input.""" def __init__( self, annotation_file: Optional[str] = None, annotations: Optional[GtType] = None, ) -> None: """Init.""" super().__init__(annotation_file) # initialize the annotations in COCO format without saving as json. if annotation_file is None: assert isinstance( annotations, dict ), "annotation file format {} not supported".format( type(annotations) ) self.dataset = annotations self.createIndex() class COCOevalV2(COCOeval): # type: ignore """Modify the COCOeval API to speedup and suppress the printing.""" def __init__( self, cat_names: List[str], cocoGt: Optional[COCO] = None, cocoDt: Optional[COCO] = None, iouType: str = "segm", nproc: int = NPROC, ): """Init.""" super().__init__(cocoGt=cocoGt, cocoDt=cocoDt, iouType=iouType) self.cat_names = cat_names self.nproc = nproc max_dets = self.params.maxDets # type: ignore self.get_score_funcs: Dict[ str, Callable[[Optional[int]], float] ] = dict( AP=self.get_score, AP50=partial( self.get_score, metric="precision", iou_thr=0.5, max_dets=max_dets[2], ), AP75=partial( self.get_score, metric="precision", iou_thr=0.75, max_dets=max_dets[2], ), APs=partial( self.get_score, metric="precision", area_rng="small", max_dets=max_dets[2], ), APm=partial( self.get_score, metric="precision", area_rng="medium", max_dets=max_dets[2], ), APl=partial( self.get_score, metric="precision", area_rng="large", max_dets=max_dets[2], ), AR1=partial(self.get_score, metric="recall", max_dets=max_dets[0]), AR10=partial( self.get_score, metric="recall", max_dets=max_dets[1] ), AR100=partial( self.get_score, metric="recall", max_dets=max_dets[2] ), ARs=partial( self.get_score, metric="recall", area_rng="small", max_dets=max_dets[2], ), ARm=partial( self.get_score, metric="recall", area_rng="medium", max_dets=max_dets[2], ), ARl=partial( self.get_score, metric="recall", area_rng="large", max_dets=max_dets[2], ), ) def evaluate(self) -> None: """Run per image evaluation on given images.""" p = self.params # type: ignore # add backward compatibility if useSegm is specified in params p.imgIds = list(np.unique(p.imgIds)) if p.useCats: p.catIds = list(np.unique(p.catIds)) p.maxDets = sorted(p.maxDets) self.params = p self._prepare() # loop through images, area range, max detection number cat_ids = p.catIds if p.useCats else [-1] self.ious = { (imgId, catId): self.computeIoU(imgId, catId) for imgId in p.imgIds for catId in cat_ids } if self.nproc > 1: with Pool(self.nproc) as pool: to_updates: List[Dict[int, DictStrAny]] = pool.map( self.compute_match, range(len(p.imgIds)) ) else: to_updates = list(map(self.compute_match, range(len(p.imgIds)))) eval_num = len(p.catIds) * len(p.areaRng) * len(p.imgIds) self.evalImgs: List[DictStrAny] = [{} for _ in range(eval_num)] for to_update in to_updates: for ind, item in to_update.items(): self.evalImgs[ind] = item self._paramsEval = copy.deepcopy(self.params) def compute_match(self, img_ind: int) -> Dict[int, DictStrAny]: """Compute matching results for each image.""" p = self.params area_num = len(p.areaRng) img_num = len(p.imgIds) to_updates: Dict[int, DictStrAny] = {} for cat_ind, cat_id in enumerate(p.catIds): for area_ind, area_rng in enumerate(p.areaRng): eval_ind: int = ( cat_ind * area_num * img_num + area_ind * img_num + img_ind ) to_updates[eval_ind] = self.evaluateImg( p.imgIds[img_ind], cat_id, area_rng, p.maxDets[-1] ) return to_updates def get_score( self, cat_id: Optional[int], metric: str = "precision", iou_thr: Optional[float] = None, area_rng: str = "all", max_dets: int = 100, ) -> float: """Extract the score according the metric and category.""" p = self.params aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == area_rng] mind = [i for i, mDet in enumerate(p.maxDets) if mDet == max_dets] s = self.eval[metric] cat_ids = np.array(p.catIds) if iou_thr is not None: t = np.where(iou_thr == p.iouThrs)[0] s = s[t] if metric == "precision": # dimension of precision: [TxRxKxAxM] if cat_id is not None: k = np.where(cat_id == cat_ids)[0] s = s[:, :, k, aind, mind] else: s = s[:, :, :, aind, mind] elif metric == "recall": # dimension of recall: [TxKxAxM] if cat_id is not None: k = np.where(cat_id == cat_ids)[0] s = s[:, k, aind, mind] else: s = s[:, :, aind, mind] else: raise NotImplementedError if len(s[s > -1]) == 0: mean_s = float("nan") else: mean_s = np.mean(s[s > -1]) return mean_s * 100 def summarize(self) -> DetResult: """Compute summary metrics for evaluation results.""" cat_ids = self.params.catIds + [None] res_dict = { metric: [ { cat_name: get_score_func(cat_id) for cat_name, cat_id in zip(self.cat_names, cat_ids) }, {OVERALL: get_score_func(None)}, ] for metric, get_score_func in self.get_score_funcs.items() } return DetResult(**res_dict) def evaluate_det( ann_frames: List[Frame], pred_frames: List[Frame], config: Config, nproc: int = NPROC, with_logs: bool = True, ) -> DetResult: """Load the ground truth and prediction results. Args: ann_frames: the ground truth annotations in Scalabel format pred_frames: the prediction results in Scalabel format. config: Metadata config. nproc: the number of process. with_logs: whether to print logs Returns: DetResult: rendered eval results. Example usage: evaluate_det( "/path/to/gts", "/path/to/results", "/path/to/cfg", nproc=4, ) """ # Convert the annotation file to COCO format ann_frames = sorted(ann_frames, key=lambda frame: frame.name) ann_coco = scalabel2coco_detection(ann_frames, config) coco_gt = COCOV2(None, ann_coco) # Load results and convert the predictions pred_frames = sorted(pred_frames, key=lambda frame: frame.name) pred_res = scalabel2coco_detection(pred_frames, config)["annotations"] coco_dt = coco_gt.loadRes(pred_res) cat_ids = coco_dt.getCatIds() cat_names = [cat["name"] for cat in coco_dt.loadCats(cat_ids)] img_ids = sorted(coco_gt.getImgIds()) ann_type = "bbox" coco_eval = COCOevalV2(cat_names, coco_gt, coco_dt, ann_type, nproc) coco_eval.params.imgIds = img_ids if with_logs: logger.info("evaluating...") coco_eval.evaluate() if with_logs: logger.info("accumulating...") coco_eval.accumulate() result = coco_eval.summarize() return result def parse_arguments() -> argparse.Namespace: """Parse the arguments.""" parser = argparse.ArgumentParser(description="Detection evaluation.") parser.add_argument( "--gt", "-g", required=True, help="path to detection ground truth" ) parser.add_argument( "--result", "-r", required=True, help="path to detection results" ) parser.add_argument( "--config", "-c", default=None, help="Path to config toml file. Contains definition of categories, " "and optionally attributes and resolution. For an example " "see scalabel/label/configs.toml", ) parser.add_argument( "--out-file", default="", help="Output file for detection evaluation results.", ) parser.add_argument( "--nproc", "-p", type=int, default=NPROC, help="number of processes for detection evaluation", ) parser.add_argument( "--quiet", "-q", action="store_true", help="without logging", ) return parser.parse_args() if __name__ == "__main__": args = parse_arguments() dataset = load(args.gt, args.nproc) gts, cfg = dataset.frames, dataset.config preds = load(args.result).frames if args.config is not None: cfg = load_label_config(args.config) assert cfg is not None eval_result = evaluate_det(gts, preds, cfg, args.nproc, not args.quite) logger.info(eval_result) logger.info(eval_result.summary()) if args.out_file: with open_write_text(args.out_file) as fp: json.dump(eval_result.json(), fp)
32.388601
79
0.56399
acedbef207c0eeab780f14e9dc897ccd1b315ef1
4,538
py
Python
regression_tests/tools/tool.py
mehrdad-shokri/retdec-regression-tests-framework
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
[ "MIT" ]
21
2017-12-12T20:38:43.000Z
2019-04-14T12:46:10.000Z
regression_tests/tools/tool.py
mehrdad-shokri/retdec-regression-tests-framework
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
[ "MIT" ]
6
2018-01-06T13:32:23.000Z
2018-09-14T15:09:11.000Z
regression_tests/tools/tool.py
mehrdad-shokri/retdec-regression-tests-framework
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
[ "MIT" ]
11
2017-12-12T20:38:46.000Z
2018-07-19T03:12:03.000Z
""" A representation of a generic tool that has run. """ import re from regression_tests.utils import memoize from regression_tests.parsers.text_parser import Text class Tool: """A representation of a generic tool that has run.""" def __init__(self, name, dir, args, cmd_runner, output, return_code, timeouted): """ :param str name: Name of the tool. :param Directory dir: Base directory for the outputs of the tool. :param ToolArguments args: Arguments of the tool. :param CmdRunner cmd_runner: Runner of external commands. :param str output: Output from the tool. :param int return_code: Return code of the tool. :param bool timeouted: Has the tool timeouted? """ self._name = name self._dir = dir self._args = args self._cmd_runner = cmd_runner self._output = output self._return_code = return_code self._timeouted = timeouted @property def name(self): """Name of the tool (`str`).""" return self._name @property def safe_name(self): """Safe name of the tool (`str`). The returned name is non-empty, starts with a letter or underscore, and contains just characters from range ``[a-zA-Z0-9_]``. Other characters are replaced with underscores. Examples: .. code-block:: python Tool(name='Tool', ...).safe_name == 'Tool' Tool(name='test me.py', ...).safe_name == 'test_me_py' Tool(name='9tool', ...).safe_name == '_9tool' Tool(name='', ...).safe_name == '_' """ safe_name = re.sub('[^a-zA-Z0-9_]', '_', self._name) if not re.match(r'^[a-zA-Z_]', safe_name): safe_name = '_' + safe_name return safe_name @property def dir(self): """Base directory for the outputs of the tool (:class:`.Directory`). """ return self._dir @property def args(self): """Arguments of the tool (:class:`.ToolArguments`). """ return self._args @property def return_code(self): """Return code of the tool (`int`).""" return self._return_code @property def succeeded(self): """Has the tool succeeded?""" return self.return_code == 0 @property def failed(self): """Has the tool failed?""" return not self.succeeded @property @memoize def output(self): """Output from the tool (:class:`.Text`).""" return Text(self._output) def end_of_output(self, lines=10): """Returns the last `lines` from the output.""" return self._end_of(self.output, lines) @property def timeouted(self): """Has the tool timeouted?""" return self._timeouted @property def input_files(self): """A tuple of input files (:class:`.File`). If there was only a single file, it returns a singleton tuple. If the tool had no input files, it returns the empty tuple. """ return self.args.input_files @property def log_file_name(self): """Name of the log file.""" # We can name the log file after the input file, provided that there is # only a single input file. Otherwise, we use just the tool name. if len(self.args.input_files) == 1: base = self.args.input_files[0].name else: base = self.name return '{}.log'.format(base) @property def log_file(self): """Log file.""" return self._get_file(self.log_file_name) @property @memoize def log(self): """Contents of the log file.""" return self.log_file.text def end_of_log(self, lines=10): """Returns the last `lines` from the log.""" return self._end_of(self.log, lines) def _run_cmd(self, *args, **kwargs): """Runs the given command with the given arguments by passing it to the command runner. """ return self._cmd_runner.run_cmd(*args, **kwargs) def _get_file(self, name): """Returns the file with the given name.""" return self.dir.get_file(name) def _file_exists(self, name): """Does a file with the given name exist?""" return self.dir.file_exists(name) def _end_of(self, output, lines): """Returns the last `lines` from `output`.""" return '\n'.join(output.split('\n')[-lines:])
29.467532
79
0.586602
acedc10216df4ecc41e5108dcd0076b4d176a568
18,658
py
Python
utils/corpus_utils.py
fareise/NeuronBlocks
725812d1aaa195d04d1488a689fada4bc0064702
[ "MIT" ]
1
2019-06-29T08:38:13.000Z
2019-06-29T08:38:13.000Z
utils/corpus_utils.py
BarryZM/NeuronBlocks
644ae96a040502821c7a899933945cefee65c7c8
[ "MIT" ]
null
null
null
utils/corpus_utils.py
BarryZM/NeuronBlocks
644ae96a040502821c7a899933945cefee65c7c8
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. from __future__ import absolute_import import string import sys import numpy as np import logging import math import warnings warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim') # remove gensim warning from gensim.models.keyedvectors import KeyedVectors from gensim.models.fasttext import FastText import codecs import copy from settings import ProblemTypes import torch if sys.version_info < (3,): maketrans = string.maketrans else: maketrans = str.maketrans def load_embedding(embedding_path, embedding_dim, format, file_type, with_head=False, word_set=None): """ Args: format: 'glove', 'word2vec', 'fasttext' file_type: 'text' or 'binary' """ embedding_dict = dict() if format == 'word2vec' or format == 'fasttext': if file_type == 'text': vector_total = KeyedVectors.load_word2vec_format(embedding_path, binary=False, unicode_errors='ignore') else: if format == 'word2vec': vector_total = KeyedVectors.load_word2vec_format(embedding_path, binary=True, unicode_errors='ignore') elif format == 'fasttext': vector_total = FastText.load_fasttext_format(embedding_path, encoding='utf8') assert vector_total.vector_size == embedding_dim if word_set is None: embedding_dict = vector_total else: if not (format == 'fasttext' and file_type == 'binary'): word_total = vector_total.index2word # actually, vector_total.index2word is the word list else: word_total = vector_total.wv.index2word for word in word_total: if word in word_set: embedding_dict[word] = vector_total[word] elif format == 'glove': with codecs.open(embedding_path, 'r', encoding='utf-8') as fin: if with_head == True: _ = fin.readline() for idx, line in enumerate(fin): line = line.rstrip() if idx == 0 and len(line.split()) == 2: continue if len(line) > 0: word, vec = line.split(" ", 1) if (word_set and word in word_set) or (word_set is None): vector = [float(num) for num in vec.split(" ")] assert len(vector) == embedding_dim embedding_dict[word] = vector else: raise Exception('The format supported are glove, word2vec or fasttext, dost not support %s now.' % format) return embedding_dict def split_array(arr, n, small_chunk_threshold=0): """split the array, each chunk has n elements (the last chunk might be different) Args: arr: the list to chunk, can be python list or numpy array n: number of elements in a chunk small_chunk_threshold: chunks have less than {small_chunk_threshold} elements are forbiddened. if the last chunk has less than {small_chunk_threshold} elements, merge them into the former chunk. """ result = [arr[i:i + n] for i in range(0, len(arr), n)] #namely, small_chunk_threshold = 0 if len(result[-1]) < small_chunk_threshold: if isinstance(result[-2], np.ndarray) == True: result[-2] = result[-2].tolist() result[-2].extend(result[-1]) #result[-1] can be either python list or np.ndarray result[-2] = np.array(result[-2]) logging.debug("The last chunk of size %d is smaller than the small_chunk_threshold %d, so merge it to chunk[-2]" % (len(result[-1]), small_chunk_threshold)) logging.debug("Now the size of chunk[-2] is increase from %d to %d" % (n, len(result[-2]))) del result[-1] return result def split_array_averagely(arr, m): """ split the array to n small chunks with nearly the same sizes. Args: arr: m: Returns: """ n = int(math.ceil(len(arr) / float(m))) return [arr[i:i + n] for i in range(0, len(arr), n)] def cut_and_padding(seq, max_len, pad=0): """ cut or pad the sequence to fixed size Args: seq: sequence max_len: the fixed size specified pad: symbol to pad Returns: fixed size sequence """ if len(seq) >= max_len: return seq[:max_len] else: return seq + [pad] * (max_len - len(seq)) def to_categorical(y, nb_classes=None): '''Convert class vector (integers from 0 to nb_classes) to binary class matrix, for use with categorical_crossentropy. ''' if not nb_classes: nb_classes = np.max(y)+1 Y = np.zeros((len(y), nb_classes)) for i in range(len(y)): Y[i, y[i]] = 1. return Y def base_filter(): f = string.punctuation f = f.replace("'", '') f += '\t\n' return f def text_to_word_sequence(text, filters=base_filter(), lower=True, split=" "): '''prune: sequence of characters to filter out ''' if lower: text = text.lower() text = text.translate(maketrans(filters, split*len(filters))) seq = text.split(split) return [_f for _f in seq if _f] def corpus_permutation(*corpora): """ Args: *corpora: different fields of a corpus Returns: """ logging.info("Start permutation") perm = np.random.permutation(len(corpora[0])) corpora_perm = [] for i in range(len(corpora)): corpora_perm.append(np.array(corpora[i])[perm]) logging.info("Permutation end!") return corpora_perm def get_batches(problem, data, length, target, batch_size, input_types, pad_ids=None, permutate=False, transform_tensor=True, predict_mode='batch'): """ Args: data: { 'string1': { 'word1': [...], 'postage_feature1': [..] } 'string2': { 'word1': [...], 'postage_feature1': [..] } lengths: { 'string1': [...], 'string2': [...] } target: [...] input_types: { "word": { "cols": [ "word1", "word2" ], "dim": 300 }, "postag": { "cols": ["postag_feature1", "postag_feature2"], "dim": 20 } permutate: shuffle data transform_tensor: if True the data returned would be Variables in CPU (except sentence length), otherwise the data would be numpy array Returns: data_batches: each element is a batch of data [ { "string1":{ 'word': ndarray/Variable, shape:[batch_size, seq_len], 'postag': ndarray/Variable, postag ids, shape: [batch_size, seq_len], ... } "string2":{ 'word': ndarray/Variable, shape:[batch_size, seq_len], 'postag': ndarray/Variable, postag ids, shape: [batch_size, seq_len], ... } } ... ] length_batches: { 'string1": ndarray, [number of batches, batch size] 'string2": ndarray, [number of batches, batch size] } target_batches: ndarray/Variable shape: [number of batches, batch_size, targets] """ if predict_mode == 'batch': logging.info("Start making batches") if permutate is True: #CAUTION! data and length would be revised data = copy.deepcopy(data) length = copy.deepcopy(length) if target is not None: target = copy.deepcopy(target) # shuffle the data permutation = np.random.permutation(len(list(target.values())[0])) for input_cluster in data: for input in data[input_cluster]: data[input_cluster][input] = np.array(data[input_cluster][input])[permutation] for single_type in length[input_cluster]: length[input_cluster][single_type] = np.array(length[input_cluster][single_type])[permutation] if target is not None: for single_target in target: length['target'][single_target] = np.array(length['target'][single_target])[permutation] target[single_target] = np.array(target[single_target])[permutation] else: for input_cluster in data: for input in data[input_cluster]: data[input_cluster][input] = np.array(data[input_cluster][input]) for single_type in length[input_cluster]: length[input_cluster][single_type] = np.array(length[input_cluster][single_type]) if target is not None: for single_target in target: length['target'][single_target] = np.array(length['target'][single_target]) target[single_target] = np.array(target[single_target]) # set up padding symbols for inputs and target if pad_ids is None: pad_ids = dict() for branch in input_types: pad_ids[branch] = problem.input_dicts[branch].id('<pad>') if ProblemTypes[problem.problem_type] == ProblemTypes.sequence_tagging: #pad_ids['target'] = problem.output_dict.id('O') if problem.target_with_pad: pad_ids['target'] = problem.output_dict.id('<pad>') else: pad_ids['target'] = 0 # CAUTION elif ProblemTypes[problem.problem_type] == ProblemTypes.classification: if problem.target_with_pad: pad_ids['target'] = problem.output_dict.id('<pad>') # CAUTION else: pad_ids['target'] = 0 # CAUTION elif ProblemTypes[problem.problem_type] == ProblemTypes.regression: pad_ids['target'] = None elif ProblemTypes[problem.problem_type] == ProblemTypes.mrc: pass type2cluster = dict() # type2cluster['word1'] = 'word' for input_type in input_types: for col_name in input_types[input_type]['cols']: type2cluster[col_name] = input_type # get the corpus size for input_cluster in data: for input_type in data[input_cluster]: corpus_size = len(data[input_cluster][input_type]) break break data_batches = [] if target is not None: target_batches = [] else: target_batches = None length_batches = [] for stidx in range(0, corpus_size, batch_size): data_batch = dict() length_batch = dict() for input_cluster in data: data_batch[input_cluster] = dict() length_batch[input_cluster] = dict() max_sen_len_cur_batch = None max_word_len_cur_batch = None if transform_tensor is True: # For nn.DataParallel, the length must be Variable as well, otherwise the length would not split for multiple GPUs #length_batch[input_cluster] = Variable(torch.LongTensor(length[input_cluster][stidx: stidx + batch_size])) for single_input_cluster in length[input_cluster]: if not isinstance(length[input_cluster][single_input_cluster][0], list): length_batch[input_cluster][single_input_cluster] = \ torch.LongTensor(np.array(length[input_cluster][single_input_cluster][stidx: stidx + batch_size])) else: length_batch[input_cluster][single_input_cluster] = [] for single_iterm in length[input_cluster][single_input_cluster][stidx: stidx + batch_size]: length_batch[input_cluster][single_input_cluster].append(torch.LongTensor(np.array(single_iterm))) else: for single_input_cluster in length[input_cluster]: length_batch[input_cluster][single_input_cluster] = \ np.array(length[input_cluster][single_input_cluster][stidx: stidx + batch_size]) # max_len_cur_batch = np.sort(length[input_cluster][stidx: stidx + batch_size])[-1] for single_input_cluster in length[input_cluster]: if 'sentence' in single_input_cluster: max_sen_len_cur_batch = np.sort(length[input_cluster][single_input_cluster][stidx: stidx + batch_size])[-1] elif 'word' in single_input_cluster: max_word_len_cur_batch = np.sort([y for x in length[input_cluster][single_input_cluster][stidx: stidx + batch_size] for y in x])[-1] #logging.info("stidx: %d, max_len: %d" % (stidx, max_len_cur_batch)) for input_type in data[input_cluster]: if input_type in type2cluster: batch_with_pad = [] # process char data if 'char' in input_type.lower(): for seq in data[input_cluster][input_type][stidx: stidx + batch_size]: batch_char_pad = [] for seq_index in range(max_sen_len_cur_batch): if seq_index < len(seq): batch_char_pad.append(cut_and_padding(seq[seq_index], max_word_len_cur_batch, pad_ids[type2cluster[input_type]])) else: batch_char_pad.append(cut_and_padding([pad_ids[type2cluster[input_type]]], max_word_len_cur_batch, pad_ids[type2cluster[input_type]])) batch_with_pad.append(batch_char_pad) else: for seq in data[input_cluster][input_type][stidx: stidx + batch_size]: #batch_with_pad.append(cut_and_padding(seq, max_len_cur_batch, pad_ids[input_type])) batch_with_pad.append(cut_and_padding(seq, max_sen_len_cur_batch, pad_ids[type2cluster[input_type]])) if transform_tensor is True: data_batch[input_cluster][type2cluster[input_type]] = torch.LongTensor(batch_with_pad) else: data_batch[input_cluster][type2cluster[input_type]] = np.array(batch_with_pad) else: data_batch[input_cluster][input_type] = data[input_cluster][input_type][stidx: stidx + batch_size] # word_length is used for padding char sequence, now only save sentence_length length_batch[input_cluster] = length_batch[input_cluster]['sentence_length'] data_batches.append(data_batch) length_batches.append(length_batch) if target is not None: target_batch = {} length_batch['target'] = {} for single_target in target: if transform_tensor is True: length_batch['target'][single_target] = torch.LongTensor(np.array(length['target'][single_target][stidx: stidx + batch_size])) else: length_batch['target'][single_target] = np.array(length['target'][single_target][stidx: stidx + batch_size]) if not (isinstance(target[single_target][0], list) or isinstance(target[single_target][0], np.ndarray)): target_batch[single_target] = target[single_target][stidx: stidx + batch_size] else: # target is also a sequence, padding needed temp_target_batch = [] for seq in target[single_target][stidx: stidx + batch_size]: temp_target_batch.append(cut_and_padding(seq, max_sen_len_cur_batch, pad_ids['target'])) target_batch[single_target] = temp_target_batch if transform_tensor is True: if ProblemTypes[problem.problem_type] == ProblemTypes.classification \ or ProblemTypes[problem.problem_type] == ProblemTypes.sequence_tagging: target_batch[single_target] = torch.LongTensor(target_batch[single_target]) elif ProblemTypes[problem.problem_type] == ProblemTypes.regression: target_batch[single_target] = torch.FloatTensor(target_batch[single_target]) elif ProblemTypes[problem.problem_type] == ProblemTypes.mrc: if not isinstance(target_batch[single_target][0], str): target_batch[single_target] = torch.LongTensor(target_batch[single_target]) else: target_batch[single_target] = np.array(target_batch[single_target]) target_batches.append(target_batch) if predict_mode == 'batch': logging.info("Batches got!") return data_batches, length_batches, target_batches def get_seq_mask(seq_len, max_seq_len=None): """ Args: seq_len (ndarray): 1d numpy array/list Returns: ndarray : 2d array seq_len_mask. the mask symbol for a real token is 1 and for <pad> is 0. """ if torch.is_tensor(seq_len): seq_len = seq_len.cpu().numpy() if max_seq_len is None: max_seq_len = seq_len.max() masks = np.array([[1]*seq_len[i] + [0] * (max_seq_len - seq_len[i]) for i in range(len(seq_len))]) return masks if __name__ == "__main__": ''' y = [1, 0, 1, 0] y_convert = to_categorical(y, 2) print(y_convert) ''' ''' load_embedding(r'/data/t-wulin/data/embeddings/glove/glove.840B.300d.txt', 300, 'glove', 'text', word_set=None) print('glove text loaded') load_embedding(r'/data/t-wulin/data/embeddings/GoogleNews-vectors-negative300.bin', 300, 'word2vec', 'binary', word_set=None) print('word2vec bin loaded') load_embedding(r'/data/t-wulin/data/embeddings/fasttext.wiki.en/wiki.en.bin', 300, 'fasttext', 'binary', word_set=None) print('fasttext bin loaded') load_embedding(r'/data/t-wulin/data/embeddings/fasttext.wiki.en/wiki.en.vec', 300, 'fasttext', 'text', word_set=None) print('fasttext text loaded') ''' load_embedding(r'/data/t-wulin/data/embeddings/fasttext.wiki.en/wiki.en.bin', 300, 'fasttext', 'binary', word_set=None) print('fasttext bin loaded')
42.21267
170
0.590524
acedc145955d5cc5a65a58bd3069677c38856054
5,012
py
Python
.vscode/extensions/batisteo.vscode-django-1.3.0/.venv/lib/python3.9/site-packages/poetry/core/packages/utils/link.py
cclint/dotfiles
8dbd1b768add3fdb68e749f965ef3025ee9de4ed
[ "MIT" ]
1
2020-08-07T16:09:57.000Z
2020-08-07T16:09:57.000Z
.vscode/extensions/batisteo.vscode-django-1.3.0/.venv/lib/python3.9/site-packages/poetry/core/packages/utils/link.py
cclint/dotfiles
8dbd1b768add3fdb68e749f965ef3025ee9de4ed
[ "MIT" ]
null
null
null
.vscode/extensions/batisteo.vscode-django-1.3.0/.venv/lib/python3.9/site-packages/poetry/core/packages/utils/link.py
cclint/dotfiles
8dbd1b768add3fdb68e749f965ef3025ee9de4ed
[ "MIT" ]
null
null
null
import posixpath import re from .utils import path_to_url from .utils import splitext try: import urllib.parse as urlparse except ImportError: import urlparse class Link: def __init__(self, url, comes_from=None, requires_python=None): """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) comes_from: instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. """ # url can be a UNC windows share if url.startswith("\\\\"): url = path_to_url(url) self.url = url self.comes_from = comes_from self.requires_python = requires_python if requires_python else None def __str__(self): if self.requires_python: rp = " (requires-python:%s)" % self.requires_python else: rp = "" if self.comes_from: return "%s (from %s)%s" % (self.url, self.comes_from, rp) else: return str(self.url) def __repr__(self): return "<Link %s>" % self def __eq__(self, other): if not isinstance(other, Link): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Link): return NotImplemented return self.url != other.url def __lt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url < other.url def __le__(self, other): if not isinstance(other, Link): return NotImplemented return self.url <= other.url def __gt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url > other.url def __ge__(self, other): if not isinstance(other, Link): return NotImplemented return self.url >= other.url def __hash__(self): return hash(self.url) @property def filename(self): _, netloc, path, _, _ = urlparse.urlsplit(self.url) name = posixpath.basename(path.rstrip("/")) or netloc name = urlparse.unquote(name) assert name, "URL %r produced no filename" % self.url return name @property def scheme(self): return urlparse.urlsplit(self.url)[0] @property def netloc(self): return urlparse.urlsplit(self.url)[1] @property def path(self): return urlparse.unquote(urlparse.urlsplit(self.url)[2]) def splitext(self): return splitext(posixpath.basename(self.path.rstrip("/"))) @property def ext(self): return self.splitext()[1] @property def url_without_fragment(self): scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url) return urlparse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") @property def subdirectory_fragment(self): match = self._subdirectory_fragment_re.search(self.url) if not match: return None return match.group(1) _hash_re = re.compile(r"(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)") @property def hash(self): match = self._hash_re.search(self.url) if match: return match.group(2) return None @property def hash_name(self): match = self._hash_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split("#", 1)[0].split("?", 1)[0]) @property def is_wheel(self): return self.ext == ".whl" @property def is_wininst(self): return self.ext == ".exe" @property def is_egg(self): return self.ext == ".egg" @property def is_sdist(self): return self.ext in {".tar.bz2", ".tar.gz", ".zip"} @property def is_artifact(self): """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ if self.scheme in ["ssh", "git", "hg", "bzr", "sftp", "svn"]: return False return True
28
81
0.573424
acedc2f1a5d23e14c51c0feb17de2a7098fa64b8
2,192
py
Python
Functions_Exerc/strings.py
ecanro/100DaysOfCode_Python
a86ebe5a793fd4743e0de87454ba76925efdd23d
[ "MIT" ]
null
null
null
Functions_Exerc/strings.py
ecanro/100DaysOfCode_Python
a86ebe5a793fd4743e0de87454ba76925efdd23d
[ "MIT" ]
null
null
null
Functions_Exerc/strings.py
ecanro/100DaysOfCode_Python
a86ebe5a793fd4743e0de87454ba76925efdd23d
[ "MIT" ]
null
null
null
# working with strings # function to sort frase def sort(str): print(f'Frase usando sorting: {str[num_sorts1:num_sorts2]}') # function to print frase 2 in 2 def print_f(str): print(str[::2]) print('Frase de 2 en 2: ') for e in str[::2]: print(f'{e}', end='') # functional def print_str(str, num_sorts1, jump): seq = str[num_sorts1::jump] print(f'a frase fica asim: {seq}') # sorting def sort_f(str): num_sort = int(input(f'Ingrese um numero entre 1 e {len(str)}: ')) print(f'Frase imprimiendo até posição {num_sort}: {str[:num_sort+1]}') print(f'Frase imprimiendo desde posição {num_sort}: {str[num_sort:]}') # for e in str[:num_sort]: # print(f'{e}', end="") # for e in str[num_sort:]: # print(f"{e}", end='') # main script str = input('Escreva uma frase: ') print(f'A frase é: {str}') # tamanho da frase print(f'O tamanho da frase e com len(): {len(str)}') #option2 using for loop qt = 0 for c in str: qt += 1 print(f'O tamanho da frase com for: {qt}') # Mayusculas, minusculas e capitalize # print('Frase Capitalize, title, mayusculas e minusculas: ') print(f'Frase usando capitalize: {str.capitalize()}, so imprime a primera Letra da frase en Maiusculas') print(f'Frase usando tittle: {str.title()}, capitaliza cada palavra da frase') print(f'Frase usando upper: {str.upper()}') print(f'Frase usando lower: {str.lower()}') # print(str.swapcase()) # .islower() or .isupper() num_sorts1 = int(input(f'Ingrese um valor entre 0 e {len(str)}: ')) num_sorts2 = int(input(f'Ingrese outro valor entre 0 e {len(str)}: ')) jump = int(input('Ingrese o valor do salto entre cada caracter: ')) # how many character occurs in the frase # for l in str: # if l == letter: # qt+=1 # find some word in the frase word = input('Ingrese una palavra a buscar na frase') position = str.find(word) if position == -1: print('Word not find in the frase: ') else: print(f'The word: {word} is find in the position: {position}') def count(str): qt = 0 for l in str: qt += 1 print(f'O tamanho da frase e: {qt}') sort(str) print_f(str) sort_f(str) print_str(str, num_sorts1, jump) count(str)
27.061728
104
0.649179
acedc4707f2bffad5d2f77c4aa432a19b65a263a
5,013
py
Python
tensor2tensor/data_generators/lang_robot.py
suraj-nair-1/tensor2tensor
8389506ff6ef3353ec3413e52e3f6e70416bbefe
[ "Apache-2.0" ]
null
null
null
tensor2tensor/data_generators/lang_robot.py
suraj-nair-1/tensor2tensor
8389506ff6ef3353ec3413e52e3f6e70416bbefe
[ "Apache-2.0" ]
null
null
null
tensor2tensor/data_generators/lang_robot.py
suraj-nair-1/tensor2tensor
8389506ff6ef3353ec3413e52e3f6e70416bbefe
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # t2t-trainer --schedule=train --alsologtostderr --generate_data --tmp_dir=/iris/u/surajn/workspace/language_offline_rl/sv2p/robot_lang_new/tmp --data_dir=/iris/u/surajn/workspace/language_offline_rl/sv2p/robot_lang_new/out --output_dir=/iris/u/surajn/workspace/language_offline_rl/sv2p/robot_lang_new/out --problem=lang_robot --model=next_frame_sv2p --hparams_set=next_frame_sv2p --train_steps=400000 --eval_steps=100 --hparams="video_num_input_frames=5, video_num_target_frames=15" """ Batch Exploration """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import video_utils from tensor2tensor.layers import modalities from tensor2tensor.utils import registry import h5py import tensorflow as tf # DATA_URL = ("/iris/u/asc8/taskexp/our-smm/exps/mean_block1/max_cms_seed0_block1_grads1/img_memory/0mem.hdf5") # just try this for now NUMEP = 50000 # Each im buffer has 500 eps: 5 trajectories of 10 steps each = 2500 trajectories EPLEN = 20 # Needs to be 50, should loop through 5 10-step trajs at a time @registry.register_problem class LangRobot(video_utils.VideoProblem): @property def num_channels(self): return 3 @property def frame_height(self): return 64 @property def frame_width(self): return 64 @property def is_generate_per_split(self): return True # num_hdf * 25000 (num of images per image memory hdf = NUMEP * EPLEN) @property def total_number_of_frames(self): return 50000*20 # Not sure if this is correct? We don't have videos def max_frames_per_video(self, hparams): return 20 @property def random_skip(self): return False @property def only_keep_videos_from_0th_frame(self): return False @property def use_not_breaking_batching(self): return True @property def extra_reading_spec(self): """Additional data fields to store on disk and their decoders.""" data_fields = { "frame_number": tf.FixedLenFeature([1], tf.int64), "action":tf.FixedLenFeature([5], tf.float32), } decoders = { "frame_number": tf.contrib.slim.tfexample_decoder.Tensor( tensor_key="frame_number"), "action": tf.contrib.slim.tfexample_decoder.Tensor(tensor_key="action"), } return data_fields, decoders def hparams(self, defaults, unused_model_hparams): p = defaults p.modality = {"inputs": modalities.ModalityType.VIDEO, "action":modalities.ModalityType.REAL_L2_LOSS, "targets": modalities.ModalityType.VIDEO} p.vocab_size = {"inputs": 256, "action": 5, "targets": 256} def parse_frames(self, f, dataset_split): ims = f['sim']['ims'][:] acts = f['sim']['actions'][:] # ims = np.transpose(ims, (0, 1, 3, 4, 2)) # Should be (500, 50, 64, 64, 3) if dataset_split == problem.DatasetSplit.TRAIN: start_ep, end_ep = 0, int(NUMEP * 0.9) # 400 eps else: start_ep, end_ep = int(NUMEP * 0.9), NUMEP # 100 for ep in range(start_ep, end_ep): # goes from 0 to 399, each 50 step eps for step in range(EPLEN): # Go through the 50 steps of the episode frame = ims[ep, step] * 255. action = acts[ep, step] yield step, frame, action def generate_samples(self, data_dir, tmp_dir, dataset_split): # path = '/iris/u/surajn/workspace/language_offline_rl/data/may_07_sawyer_10k/data.hdf5' # path = '/iris/u/surajn/workspace/language_offline_rl/data/may_08_sawyer_50k/data.hdf5' path = '/iris/u/surajn/workspace/language_offline_rl/data/aug_24_sawyernew_50k/data.hdf5' f = h5py.File(path, "r") for frame_number, frame, action in self.parse_frames(f, dataset_split): # frame number needs to be 0, ..., 49 yield { "frame_number": [frame_number], "frame": frame, "action": action.tolist(), }
36.064748
483
0.664672
acedc4f76513d270c7298983e5ff98da4b44d740
55,724
py
Python
parser/team20/execution/AST/sentence.py
webdev188/tytus
847071edb17b218f51bb969d335a8ec093d13f94
[ "MIT" ]
35
2020-12-07T03:11:43.000Z
2021-04-15T17:38:16.000Z
parser/team20/execution/AST/sentence.py
webdev188/tytus
847071edb17b218f51bb969d335a8ec093d13f94
[ "MIT" ]
47
2020-12-09T01:29:09.000Z
2021-01-13T05:37:50.000Z
parser/team20/execution/AST/sentence.py
webdev188/tytus
847071edb17b218f51bb969d335a8ec093d13f94
[ "MIT" ]
556
2020-12-07T03:13:31.000Z
2021-06-17T17:41:10.000Z
class Sentence: ''' ''' class CreateDatabase(Sentence): def __init__(self, name, ifNotExistsFlag, OrReplace, OwnerMode): self.name = name self.ifNotExistsFlag = ifNotExistsFlag self.OrReplace = OrReplace self.OwnerMode = OwnerMode def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"CreateDatabase\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CREATE") + hash(self)) + '\n' dot += str(hash("CREATE") + hash(self)) + \ '[label=\"' + "CREATE" + '\"]\n' if(self.OrReplace): dot += str(hash(self)) + '->' + \ str(hash("OR") + hash(self)) + '\n' dot += str(hash("OR") + hash(self)) + \ '[label=\"' + "OR" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("REPLACE") + hash(self)) + '\n' dot += str(hash("REPLACE") + hash(self)) + \ '[label=\"' + "REPLACE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DATABASE") + hash(self)) + '\n' dot += str(hash("DATABASE") + hash(self)) + \ '[label=\"' + "DATABASE" + '\"]\n' if(self.ifNotExistsFlag): dot += str(hash(self)) + '->' + \ str(hash("IF") + hash(self)) + '\n' dot += str(hash("IF") + hash(self)) + \ '[label=\"' + "IF" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("NOT") + hash(self)) + '\n' dot += str(hash("NOT") + hash(self)) + \ '[label=\"' + "NOT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("EXISTS") + hash(self)) + '\n' dot += str(hash("EXISTS") + hash(self)) + \ '[label=\"' + "EXISTS" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' if(self.OwnerMode[0] != None or self.OwnerMode[1] != None): dot += str(hash(self)) + '->' + \ str(hash("ownerMode") + hash(self)) + '\n' dot += str(hash("ownerMode") + hash(self)) + \ '[label=\"' + "ownerMode" + '\"]\n' if(self.OwnerMode[0] != None): dot += str(hash("ownerMode") + hash(self)) + '->' + \ str(hash("OWNER") + hash("ownerMode") + hash(self)) + '\n' dot += str(hash("OWNER") + hash("ownerMode") + hash(self)) + \ '[label=\"' + "OWNER" + '\"]\n' dot += str(hash("ownerMode") + hash(self)) + '->' + \ str(hash(self.OwnerMode[0]) + hash("ownerMode") + hash(self)) + '\n' dot += str(hash(self.OwnerMode[0]) + hash("ownerMode") + hash(self)) + \ '[label=\"' + self.OwnerMode[0] + '\"]\n' if(self.OwnerMode[1] != None): dot += str(hash("ownerMode") + hash(self)) + '->' + \ str(hash("MODE") + hash("ownerMode") + hash(self)) + '\n' dot += str(hash("MODE") + hash("ownerMode") + hash(self)) + \ '[label=\"' + "MODE" + '\"]\n' dot+= self.OwnerMode[1].graphAST('',hash("ownerMode") + hash(self)) return dot class ShowDatabases(Sentence): '''''' def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ShowDatabases\"]\n' dot += str(hash(self)) + '->' + \ str(hash("SHOW") + hash(self)) + '\n' dot += str(hash("SHOW") + hash(self)) + \ '[label=\"' + "SHOW" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DATABASES") + hash(self)) + '\n' dot += str(hash("DATABASES") + hash(self)) + \ '[label=\"' + "DATABASES" + '\"]\n' return dot class DropDatabase(Sentence): def __init__(self, name, ifExistsFlag): self.name = name self.ifExistsFlag = ifExistsFlag def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"DropDatabase\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DROP") + hash(self)) + '\n' dot += str(hash("DROP") + hash(self)) + \ '[label=\"' + "DROP" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DATABASE") + hash(self)) + '\n' dot += str(hash("DATABASE") + hash(self)) + \ '[label=\"' + "DATABASE" + '\"]\n' if(self.ifExistsFlag): dot += str(hash(self)) + '->' + \ str(hash("IF") + hash(self)) + '\n' dot += str(hash("IF") + hash(self)) + \ '[label=\"' + "IF" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("EXISTS") + hash(self)) + '\n' dot += str(hash("EXISTS") + hash(self)) + \ '[label=\"' + "EXISTS" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' return dot class DropTable(Sentence): def __init__(self, name): self.name = name def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"DropTable\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DROP") + hash(self)) + '\n' dot += str(hash("DROP") + hash(self)) + \ '[label=\"' + "DROP" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' return dot class Use(Sentence): def __init__(self, name): self.name = name def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"Use\"]\n' dot += str(hash(self)) + '->' + \ str(hash("USE") + hash(self)) + '\n' dot += str(hash("USE") + hash(self)) + \ '[label=\"' + "USE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' return dot class AlterDatabaseRename(Sentence): def __init__(self, oldname,newname): self.oldname = oldname self.newname = newname def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterDatabaseRename\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DATABASE") + hash(self)) + '\n' dot += str(hash("DATABASE") + hash(self)) + \ '[label=\"' + "DATABASE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.oldname) + hash(self)) + '\n' dot += str(hash(self.oldname) + hash(self)) + \ '[label=\"' + self.oldname + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("RENAME") + hash(self)) + '\n' dot += str(hash("RENAME") + hash(self)) + \ '[label=\"' + "RENAME" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TO") + hash(self)) + '\n' dot += str(hash("TO") + hash(self)) + \ '[label=\"' + "TO" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.newname) + hash(self)) + '\n' dot += str(hash(self.newname) + hash(self)) + \ '[label=\"' + self.newname + '\"]\n' return dot class AlterDatabaseOwner(Sentence): def __init__(self, name, newowner): self.name = name self.newowner = newowner def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterDatabaseOwner\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DATABASE") + hash(self)) + '\n' dot += str(hash("DATABASE") + hash(self)) + \ '[label=\"' + "DATABASE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("OWNER") + hash(self)) + '\n' dot += str(hash("OWNER") + hash(self)) + \ '[label=\"' + "OWNER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TO") + hash(self)) + '\n' dot += str(hash("TO") + hash(self)) + \ '[label=\"' + "TO" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.newowner) + hash(self)) + '\n' dot += str(hash(self.newowner) + hash(self)) + \ '[label=\"' + self.newowner + '\"]\n' return dot class AlterTableDropColumn(Sentence): def __init__(self, table, column): self.table = table self.column = column def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableDropColumn\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DROP") + hash(self)) + '\n' dot += str(hash("DROP") + hash(self)) + \ '[label=\"' + "DROP" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("COLUMN") + hash(self)) + '\n' dot += str(hash("COLUMN") + hash(self)) + \ '[label=\"' + "COLUMN" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("C"+self.column) + hash(self)) + '\n' dot += str(hash("C"+self.column) + hash(self)) + \ '[label=\"' + self.column + '\"]\n' return dot class AlterTableAddConstraintUnique(Sentence): def __init__(self, table, constraint, column): self.table = table self.constraint = constraint self.column = column def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableAddConstraintUnique\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ADD") + hash(self)) + '\n' dot += str(hash("ADD") + hash(self)) + \ '[label=\"' + "ADD" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CONSTRAINT") + hash(self)) + '\n' dot += str(hash("CONSTRAINT") + hash(self)) + \ '[label=\"' + "CONSTRAINT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.constraint) + hash(self)) + '\n' dot += str(hash(self.constraint) + hash(self)) + \ '[label=\"' + self.constraint + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("UNIQUE") + hash(self)) + '\n' dot += str(hash("UNIQUE") + hash(self)) + \ '[label=\"' + "UNIQUE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.column) + hash(self)) + '\n' dot += str(hash(self.column) + hash(self)) + \ '[label=\"' + self.column + '\"]\n' return dot class AlterTableAddForeignKey(Sentence): def __init__(self, table, column, rel_table, rel_column): self.table = table self.column = column self.rel_table = rel_table self.rel_column = rel_column def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableAddForeignKey\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("T"+self.table) + hash(self)) + '\n' dot += str(hash("T"+self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ADD") + hash(self)) + '\n' dot += str(hash("ADD") + hash(self)) + \ '[label=\"' + "ADD" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("FOREIGN") + hash(self)) + '\n' dot += str(hash("FOREIGN") + hash(self)) + \ '[label=\"' + "FOREIGN" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("KEY") + hash(self)) + '\n' dot += str(hash("KEY") + hash(self)) + \ '[label=\"' + "KEY" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("C"+self.column) + hash(self)) + '\n' dot += str(hash("C"+self.column) + hash(self)) + \ '[label=\"' + self.column + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("REFERENCES") + hash(self)) + '\n' dot += str(hash("REFERENCES") + hash(self)) + \ '[label=\"' + "REFERENCES" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.rel_table) + hash(self)) + '\n' dot += str(hash(self.rel_table) + hash(self)) + \ '[label=\"' + self.rel_table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("RC"+self.rel_column) + hash(self)) + '\n' dot += str(hash("RC"+self.rel_column) + hash(self)) + \ '[label=\"' + self.rel_column + '\"]\n' return dot class AlterTableAlterColumnSetNull(Sentence): def __init__(self, table, column, null): self.table = table self.column = column self.null = null def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableAlterColumnSetNull\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("T"+self.table) + hash(self)) + '\n' dot += str(hash("T"+self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTERC") + hash(self)) + '\n' dot += str(hash("ALTERC") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("COLUMN") + hash(self)) + '\n' dot += str(hash("COLUMN") + hash(self)) + \ '[label=\"' + "COLUMN" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.column) + hash(self)) + '\n' dot += str(hash(self.column) + hash(self)) + \ '[label=\"' + self.column + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("SET") + hash(self)) + '\n' dot += str(hash("SET") + hash(self)) + \ '[label=\"' + "SET" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("null") + hash(self)) + '\n' dot += str(hash("null") + hash(self)) + \ '[label=\"' + "null" + '\"]\n' dot += str(hash("null") +hash(self)) + '->' + \ str(hash("null") + hash(str(self.null)) + hash(self)) + '\n' dot += str(hash("null") + hash(str(self.null)) + hash(self)) + \ '[label=\"' + str(self.null) + '\"]\n' return dot class AlterTableAlterColumnType(Sentence): def __init__(self, table, column, newtype): self.table = table self.column = column self.newtype = newtype # type [type,length] or type = [type] def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableAlterColumnType\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("T"+self.table) + hash(self)) + '\n' dot += str(hash("T"+self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTERC") + hash(self)) + '\n' dot += str(hash("ALTERC") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("COLUMN") + hash(self)) + '\n' dot += str(hash("COLUMN") + hash(self)) + \ '[label=\"' + "COLUMN" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("C"+self.column) + hash(self)) + '\n' dot += str(hash("C"+self.column) + hash(self)) + \ '[label=\"' + self.column + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TYPE") + hash(self)) + '\n' dot += str(hash("TYPE") + hash(self)) + \ '[label=\"' + "TYPE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.newtype[0]) + hash(self)) + '\n' dot += str(hash(self.newtype[0]) + hash(self)) + \ '[label=\"' + self.newtype[0] + '\"]\n' return dot class AlterTableAddColumn(Sentence): def __init__(self, table, column, type): self.table = table self.column = column self.type = type # type [type,length] or type = [type] def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableAddColumn\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("T"+self.table) + hash(self)) + '\n' dot += str(hash("T"+self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ADD") + hash(self)) + '\n' dot += str(hash("ADD") + hash(self)) + \ '[label=\"' + "ADD" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("COLUMN") + hash(self)) + '\n' dot += str(hash("COLUMN") + hash(self)) + \ '[label=\"' + "COLUMN" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("C"+self.column) + hash(self)) + '\n' dot += str(hash("C"+self.column) + hash(self)) + \ '[label=\"' + self.column + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.type[0]) + hash(self)) + '\n' dot += str(hash(self.type[0]) + hash(self)) + \ '[label=\"' + self.type[0] + '\"]\n' return dot class AlterTableDropConstraint(Sentence): def __init__(self, table, constraint): self.table = table self.constraint = constraint def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"AlterTableDropConstraint\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ALTER") + hash(self)) + '\n' dot += str(hash("ALTER") + hash(self)) + \ '[label=\"' + "ALTER" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("T"+self.table) + hash(self)) + '\n' dot += str(hash("T"+self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DROP") + hash(self)) + '\n' dot += str(hash("DROP") + hash(self)) + \ '[label=\"' + "DROP" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CONSTRAINT") + hash(self)) + '\n' dot += str(hash("CONSTRAINT") + hash(self)) + \ '[label=\"' + "CONSTRAINT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("C"+self.constraint) + hash(self)) + '\n' dot += str(hash("C"+self.constraint) + hash(self)) + \ '[label=\"' + self.constraint + '\"]\n' return dot class Insert(Sentence): def __init__(self, table, columns, values): self.table = table self.columns = columns self.values = values def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"Insert\"]\n' dot += str(hash(self)) + '->' + \ str(hash("INSERT") + hash(self)) + '\n' dot += str(hash("INSERT") + hash(self)) + \ '[label=\"' + "INSERT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("INTO") + hash(self)) + '\n' dot += str(hash("INTO") + hash(self)) + \ '[label=\"' + "INTO" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("Columns") + hash(self)) + '\n' dot += str(hash("Columns") + hash(self)) + \ '[label=\"' + "Columns" + '\"]\n' for column in self.columns: dot += str(hash("Columns") + hash(self)) + '->' + \ str(hash("Columns") + hash(self) + hash(column)) + '\n' dot += str(hash("Columns") + hash(self)+hash(column)) + \ '[label=\"' + column + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("VALUES") + hash(self)) + '\n' dot += str(hash("VALUES") + hash(self)) + \ '[label=\"' + "VALUES" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("Values") + hash(self)) + '\n' dot += str(hash("Values") + hash(self)) + \ '[label=\"' + "Values" + '\"]\n' for value in self.values: dot+= value.graphAST('',str(hash("Values") + hash(self))) return dot class InsertAll(Sentence): def __init__(self, table, values): self.table = table self.values = values def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"InsertAll\"]\n' dot += str(hash(self)) + '->' + \ str(hash("INSERT") + hash(self)) + '\n' dot += str(hash("INSERT") + hash(self)) + \ '[label=\"' + "INSERT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("INTO") + hash(self)) + '\n' dot += str(hash("INTO") + hash(self)) + \ '[label=\"' + "INTO" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("VALUES") + hash(self)) + '\n' dot += str(hash("VALUES") + hash(self)) + \ '[label=\"' + "VALUES" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("Values") + hash(self)) + '\n' dot += str(hash("Values") + hash(self)) + \ '[label=\"' + "Values" + '\"]\n' for value in self.values: dot+= value.graphAST('',str(hash("Values") + hash(self))) return dot class Delete(Sentence): def __init__(self, table, expression): self.table = table self.expression = expression def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"Delete\"]\n' dot += str(hash(self)) + '->' + \ str(hash("DELETE") + hash(self)) + '\n' dot += str(hash("DELETE") + hash(self)) + \ '[label=\"' + "DELETE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("FROM") + hash(self)) + '\n' dot += str(hash("FROM") + hash(self)) + \ '[label=\"' + "FROM" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("WHERE") + hash(self)) + '\n' dot += str(hash("WHERE") + hash(self)) + \ '[label=\"' + "WHERE" + '\"]\n' dot += self.expression.graphAST('',str(hash("WHERE") + hash(self))) return dot class Truncate(Sentence): def __init__(self, tables): self.tables = tables def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"Truncate\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TRUNCATE") + hash(self)) + '\n' dot += str(hash("TRUNCATE") + hash(self)) + \ '[label=\"' + "TRUNCATE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("Tables") + hash(self)) + '\n' dot += str(hash("Tables") + hash(self)) + \ '[label=\"' + "Tables" + '\"]\n' for table in self.tables: dot += str(hash("Tables") + hash(self)) + '->' + \ str(hash("Tables") + hash(self) + hash(table)) + '\n' dot += str(hash("Tables") + hash(self) + hash(table)) + \ '[label=\"' + table + '\"]\n' return dot class Update(Sentence): def __init__(self, table, values, expression): self.table = table self.values = values #values = [value1,value2,...,valuen] -> value = [id,expression] self.expression = expression def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"Update\"]\n' dot += str(hash(self)) + '->' + \ str(hash("UPDATE") + hash(self)) + '\n' dot += str(hash("UPDATE") + hash(self)) + \ '[label=\"' + "UPDATE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("SET") + hash(self)) + '\n' dot += str(hash("SET") + hash(self)) + \ '[label=\"' + "SET" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("Values") + hash(self)) + '\n' dot += str(hash("Values") + hash(self)) + \ '[label=\"' + "Values" + '\"]\n' for value in self.values: dot += str(hash("Values") + hash(self)) + '->' + \ str(hash("Values") + hash(self) + hash(value[0])) + '\n' dot += str(hash("Values") + hash(self) + hash(value[0])) + \ '[label=\"' + value[0] + '\"]\n' dot+= value[1].graphAST('',str(hash("Values") + hash(self))) dot += str(hash(self)) + '->' + \ str(hash("WHERE") + hash(self)) + '\n' dot += str(hash("WHERE") + hash(self)) + \ '[label=\"' + "WHERE" + '\"]\n' dot += self.expression.graphAST('',str(hash("WHERE") + hash(self))) return dot class CreateType(Sentence): def __init__(self, name, expressions): self.name = name self.expressions = expressions #expressions = [expression1,expression2,...,expressionn] def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"CreateType\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CREATE") + hash(self)) + '\n' dot += str(hash("CREATE") + hash(self)) + \ '[label=\"' + "CREATE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TYPE") + hash(self)) + '\n' dot += str(hash("TYPE") + hash(self)) + \ '[label=\"' + "TYPE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("AS") + hash(self)) + '\n' dot += str(hash("AS") + hash(self)) + \ '[label=\"' + "AS" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("ENUM") + hash(self)) + '\n' dot += str(hash("ENUM") + hash(self)) + \ '[label=\"' + "ENUM" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("expressions") + hash(self)) + '\n' dot += str(hash("expressions") + hash(self)) + \ '[label=\"' + "expressions" + '\"]\n' for expression in self.expressions: dot+= expression.graphAST('',str(hash("expressions") + hash(self))) return dot class CreateTable(Sentence): def __init__(self, name, columns, inherits): self.name = name self.columns = columns #columns = [column1,column2,...,columnn] Every Column is an instance of {'id','check','constraint','unique','primary','foreign'} self.inherits = inherits #Types: #column -> {ColumnId,ColumnCheck,ColumnConstraint,ColumnUnique,ColumnPrimaryKey,ColumnForeignKey} def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"CreateTable\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CREATE") + hash(self)) + '\n' dot += str(hash("CREATE") + hash(self)) + \ '[label=\"' + "CREATE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("TABLE") + hash(self)) + '\n' dot += str(hash("TABLE") + hash(self)) + \ '[label=\"' + "TABLE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("columns") + hash(self)) + '\n' dot += str(hash("columns") + hash(self)) + \ '[label=\"' + "columns" + '\"]\n' for column in self.columns: dot+= column.graphAST('',str(hash("columns") + hash(self))) return dot class Select(Sentence): def __init__(self, columns, distinct, tables, options): self.columns = columns self.distinct = distinct self.tables = tables self.options = options # options = {'where','orderby','limit','offset','groupby','having'} or None # options se puede acceder a los items de la forma options['nombrepropiedad'] si no existe devuelve 'nombrepropiedad' # where -> Expression # orderby -> SortExpressionList # sortExpressionList -> lista de expresiones de la forma [Expression,ASC/DESC] # limit -> Expression/ALL ALL is the same as omitting the LIMIT clause # offset -> Expression OFFSET says to skip that many rows before beginning to return rows. OFFSET 0 is the same as omitting the OFFSET clause. # If both OFFSET and LIMIT appear, then OFFSET rows are skipped before starting to count the LIMIT rows that are returned. # groupby -> ExpressionList # having -> Expression def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"Select\"]\n' dot += str(hash(self)) + '->' + \ str(hash("SELECT") + hash(self)) + '\n' dot += str(hash("SELECT") + hash(self)) + \ '[label=\"' + "SELECT" + '\"]\n' if(self.distinct): dot += str(hash(self)) + '->' + \ str(hash("DISTINCT") + hash(self)) + '\n' dot += str(hash("DISTINCT") + hash(self)) + \ '[label=\"' + "DISTINCT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("Columns") + hash(self)) + '\n' dot += str(hash("Columns") + hash(self)) + \ '[label=\"' + "Columns" + '\"]\n' for column in self.columns: dot += column.graphAST('',str(hash("Columns") + hash(self))) if(self.tables != None): dot += str(hash(self)) + '->' + \ str(hash("Tables") + hash(self)) + '\n' dot += str(hash("Tables") + hash(self)) + \ '[label=\"' + "Tables" + '\"]\n' for table in self.tables: dot += table.graphAST('',str(hash("Tables") + hash(self))) if (bool(self.options)): dot += str(hash(self)) + '->' + \ str(hash("Options") + hash(self)) + '\n' dot += str(hash("Options") + hash(self)) + \ '[label=\"' + "Options" + '\"]\n' try: self.options['where'] dot += str(hash("Options") +hash(self)) + '->' + \ str(hash("WhereClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("WhereClause") + hash("Options") +hash(self)) + \ '[label=\"' + "WhereClause" + '\"]\n' dot += str(hash("WhereClause") + hash("Options") +hash(self)) + '->' + \ str(hash("WHERE")+hash("WhereClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("WHERE")+hash("WhereClause") + hash("Options") +hash(self)) + \ '[label=\"' + "WHERE" + '\"]\n' dot += self.options['where'].graphAST('',str(hash("WhereClause") + hash("Options") +hash(self))) except: pass try: self.options['limit'] dot += str(hash("Options") +hash(self)) + '->' + \ str(hash("LimitClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("LimitClause") + hash("Options") +hash(self)) + \ '[label=\"' + "LimitClause" + '\"]\n' dot += str(hash("LimitClause") + hash("Options") +hash(self)) + '->' + \ str(hash("LIMIT")+hash("LimitClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("LIMIT")+hash("LimitClause") + hash("Options") +hash(self)) + \ '[label=\"' + "LIMIT" + '\"]\n' if(self.options['limit']!='ALL'): dot += self.options['limit'].graphAST('',str(hash("LimitClause") + hash("Options") +hash(self))) else: dot += str(hash("LimitClause") + hash("Options") +hash(self)) + '->' + \ str(hash("ALL")+hash("LimitClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("ALL")+hash("LimitClause") + hash("Options") +hash(self)) + \ '[label=\"' + "ALL" + '\"]\n' except: pass try: self.options['offset'] dot += str(hash("Options") +hash(self)) + '->' + \ str(hash("OffsetClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("OffsetClause") + hash("Options") +hash(self)) + \ '[label=\"' + "OffsetClause" + '\"]\n' dot += str(hash("OffsetClause") + hash("Options") +hash(self)) + '->' + \ str(hash("OFFSET")+hash("OffsetClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("OFFSET")+hash("OffsetClause") + hash("Options") +hash(self)) + \ '[label=\"' + "OFFSET" + '\"]\n' dot += self.options['offset'].graphAST('',str(hash("OffsetClause") + hash("Options") +hash(self))) except: pass try: self.options['orderby'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("OrderByClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("OrderByClause") + hash("Options") +hash(self)) + \ '[label=\"' + "OrderByClause" + '\"]\n' dot += str(hash("OrderByClause") + hash("Options") +hash(self)) + '->' + \ str(hash("ORDER")+hash("OrderByClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("ORDER")+hash("OrderByClause") + hash("Options") +hash(self)) + \ '[label=\"' + "ORDER" + '\"]\n' dot += str(hash("OrderByClause") + hash("Options") +hash(self)) + '->' + \ str(hash("BY")+hash("OrderByClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("BY")+hash("OrderByClause") + hash("Options") +hash(self)) + \ '[label=\"' + "BY" + '\"]\n' for sortexpression in self.options['orderby']: dot += sortexpression[0].graphAST('',str(hash("OrderByClause") + hash("Options") +hash(self))) dot += str(hash("OrderByClause") + hash("Options") +hash(self)) + '->' + \ str(hash(sortexpression[0])+hash(sortexpression[1])+hash("OrderByClause") + hash(self)) + '\n' dot += str(hash(sortexpression[0])+hash(sortexpression[1])+hash("OrderByClause") + hash(self)) + \ '[label=\"' + sortexpression[1] + '\"]\n' except: pass try: self.options['groupby'] dot += str(hash("Options") +hash(self)) + '->' + \ str(hash("GroupbyClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("GroupbyClause") + hash("Options") +hash(self)) + \ '[label=\"' + "GroupbyClause" + '\"]\n' dot += str(hash("GroupbyClause") + hash("Options") +hash(self)) + '->' + \ str(hash("GROUP")+hash("GroupbyClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("GROUP")+hash("GroupbyClause") + hash("Options") +hash(self)) + \ '[label=\"' + "GROUP" + '\"]\n' dot += str(hash("GroupbyClause") + hash("Options") +hash(self)) + '->' + \ str(hash("BY")+hash("GroupbyClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("BY")+hash("GroupbyClause") + hash("Options") +hash(self)) + \ '[label=\"' + "BY" + '\"]\n' for expression in self.options['groupby']: dot += expression.graphAST('',str(hash("GroupbyClause") + hash("Options") +hash(self))) except: pass try: self.options['having'] dot += str(hash("Options") +hash(self)) + '->' + \ str(hash("HavingClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("HavingClause") + hash("Options") +hash(self)) + \ '[label=\"' + "HavingClause" + '\"]\n' dot += str(hash("HavingClause") + hash("Options") +hash(self)) + '->' + \ str(hash("HAVING")+hash("HavingClause") + hash("Options") +hash(self)) + '\n' dot += str(hash("HAVING")+hash("HavingClause") + hash("Options") +hash(self)) + \ '[label=\"' + "HAVING" + '\"]\n' dot += self.options['having'].graphAST('',str(hash("HavingClause") + hash("Options") +hash(self))) except: pass return dot class SelectMultiple(Sentence): def __init__(self, select1, operator, select2): self.select1 = select1 self.operator = operator self.select2 = select2 def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"'+self.operator+'\"]\n' dot += self.select1.graphAST('',str(hash(self))) dot += self.select2.graphAST('',str(hash(self))) return dot class CreateTableOpt: ''' ''' class ColumnId(CreateTableOpt): def __init__(self, name, type, options): self.name = name self.type = type self.options = options #options = {'default','null','primary','reference','unique','constraint','check'} # options se puede acceder a los items de la forma options['nombrepropiedad'] si no existe devuelve 'nombrepropiedad' # default -> Expression # null -> True/False # primary -> True # reference -> ID # unique -> True # constraintunique -> ID # check -> Expression # constraintcheck -> ID,Expression def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ColumnId\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.type[0]) + hash(self)) + '\n' dot += str(hash(self.type[0]) + hash(self)) + \ '[label=\"' + self.type[0] + '\"]\n' if(bool(self.options)): dot += str(hash(self)) + '->' + \ str(hash("Options") + hash(self)) + '\n' dot += str(hash("Options") + hash(self)) + \ '[label=\"' + "Options" + '\"]\n' try: self.options['default'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("default")) + '\n' dot += str(hash("Options") + hash(self)+ hash("default")) + \ '[label=\"' + "default" + '\"]\n' dot += self.options['default'].graphAST('',str(hash("Options") + hash(self)+ hash("default"))) except: pass try: self.options['null'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("null")) + '\n' dot += str(hash("Options") + hash(self)+ hash("null")) + \ '[label=\"' + "null" + '\"]\n' dot += str(hash("Options") + hash(self)+ hash("null")) + '->' + \ str(hash("Options") + hash(self) + hash("null")+ hash(str(self.options['null']))) + '\n' dot += str(hash("Options") + hash(self)+ hash("null") + hash(str(self.options['null']))) + \ '[label=\"' + str(self.options['null']) + '\"]\n' except: pass try: self.options['primary'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("primarykey")) + '\n' dot += str(hash("Options") + hash(self)+ hash("primarykey")) + \ '[label=\"' + "primarykey" + '\"]\n' dot += str(hash("Options") + hash(self)+ hash("primarykey")) + '->' + \ str(hash("Options") + hash(self) + hash("primarykey")+ hash(str(self.options['primary']))) + '\n' dot += str(hash("Options") + hash(self)+ hash("primarykey") + hash(str(self.options['primary']))) + \ '[label=\"' + str(self.options['primary']) + '\"]\n' except: pass try: self.options['reference'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("reference")) + '\n' dot += str(hash("Options") + hash(self)+ hash("reference")) + \ '[label=\"' + "reference" + '\"]\n' dot += str(hash("Options") + hash(self)+ hash("reference")) + '->' + \ str(hash("Options") + hash(self) + hash("reference")+ hash(str(self.options['reference']))) + '\n' dot += str(hash("Options") + hash(self)+ hash("reference") + hash(str(self.options['reference']))) + \ '[label=\"' + str(self.options['reference']) + '\"]\n' except: pass try: self.options['unique'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("unique")) + '\n' dot += str(hash("Options") + hash(self)+ hash("unique")) + \ '[label=\"' + "unique" + '\"]\n' dot += str(hash("Options") + hash(self)+ hash("unique")) + '->' + \ str(hash("Options") + hash(self) + hash("unique")+ hash(str(self.options['unique']))) + '\n' dot += str(hash("Options") + hash(self)+ hash("unique") + hash(str(self.options['unique']))) + \ '[label=\"' + str(self.options['unique']) + '\"]\n' except: pass try: self.options['constraintunique'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("constraintunique")) + '\n' dot += str(hash("Options") + hash(self)+ hash("constraintunique")) + \ '[label=\"' + "constraintunique" + '\"]\n' dot += str(hash("Options") + hash(self)+ hash("constraintunique")) + '->' + \ str(hash("Options") + hash(self) + hash("constraintunique")+ hash(str(self.options['constraintunique']))) + '\n' dot += str(hash("Options") + hash(self)+ hash("constraintunique") + hash(str(self.options['constraintunique']))) + \ '[label=\"' + str(self.options['constraintunique']) + '\"]\n' except: pass try: self.options['check'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("check")) + '\n' dot += str(hash("Options") + hash(self)+ hash("check")) + \ '[label=\"' + "check" + '\"]\n' dot += self.options['check'].graphAST('',str(hash("Options") + hash(self)+ hash("check"))) except: pass try: self.options['constraintcheck'] dot += str(hash("Options") + hash(self)) + '->' + \ str(hash("Options") + hash(self) + hash("constraintcheck")) + '\n' dot += str(hash("Options") + hash(self)+ hash("constraintcheck")) + \ '[label=\"' + "constraintcheck" + '\"]\n' dot += str(hash("Options") + hash(self)+ hash("constraintcheck")) + '->' + \ str(hash("Options") + hash(self) + hash("constraintcheck")+ hash(str(self.options['constraintcheck'][0]))) + '\n' dot += str(hash("Options") + hash(self)+ hash("constraintcheck") + hash(str(self.options['constraintcheck'][0]))) + \ '[label=\"' + str(self.options['constraintunique'][0]) + '\"]\n' dot += self.options['constraintcheck'][1].graphAST('',str(hash("Options") + hash(self)+ hash("constraintcheck"))) except: pass return dot class ColumnCheck(CreateTableOpt): def __init__(self, expression): self.expression = expression def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ColumnCheck\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CHECK") + hash(self)) + '\n' dot += str(hash("CHECK") + hash(self)) + \ '[label=\"' + "CHECK" + '\"]\n' dot += self.expression.graphAST('',str(hash(self))) return dot class ColumnConstraint(CreateTableOpt): def __init__(self, name,expression): self.name = name self.expression = expression def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ColumnConstraint\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CONSTRAINT") + hash(self)) + '\n' dot += str(hash("CONSTRAINT") + hash(self)) + \ '[label=\"' + "CONSTRAINT" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.name) + hash(self)) + '\n' dot += str(hash(self.name) + hash(self)) + \ '[label=\"' + self.name + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("CHECK") + hash(self)) + '\n' dot += str(hash("CHECK") + hash(self)) + \ '[label=\"' + "CHECK" + '\"]\n' dot += self.expression.graphAST('',str(hash(self))) return dot class ColumnUnique(CreateTableOpt): def __init__(self, columnslist): self.columnslist = columnslist # is and idList [columnname1,columnname2,...,columnnamen] def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ColumnUnique\"]\n' dot += str(hash(self)) + '->' + \ str(hash("UNIQUE") + hash(self)) + '\n' dot += str(hash("UNIQUE") + hash(self)) + \ '[label=\"' + "UNIQUE" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("idList") + hash(self)) + '\n' dot += str(hash("idList") + hash(self)) + \ '[label=\"' + "idList" + '\"]\n' for column in self.columnslist: dot += str(hash("idList") + hash(self)) + '->' + \ str(hash("idList") + hash(self) + hash(column)) + '\n' dot += str(hash("idList") + hash(self) + hash(column)) + \ '[label=\"' + column + '\"]\n' return dot class ColumnPrimaryKey(CreateTableOpt): def __init__(self, columnslist): self.columnslist = columnslist # is and idList [columnname1,columnname2,...,columnnamen] def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ColumnPrimaryKey\"]\n' dot += str(hash(self)) + '->' + \ str(hash("PRIMARY") + hash(self)) + '\n' dot += str(hash("PRIMARY") + hash(self)) + \ '[label=\"' + "PRIMARY" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("KEY") + hash(self)) + '\n' dot += str(hash("KEY") + hash(self)) + \ '[label=\"' + "KEY" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("idList") + hash(self)) + '\n' dot += str(hash("idList") + hash(self)) + \ '[label=\"' + "idList" + '\"]\n' for column in self.columnslist: dot += str(hash("idList") + hash(self)) + '->' + \ str(hash("idList") + hash(self) + hash(column)) + '\n' dot += str(hash("idList") + hash(self) + hash(column)) + \ '[label=\"' + column + '\"]\n' return dot class ColumnForeignKey(CreateTableOpt): def __init__(self, columnslist, table, columnslist_ref): self.columnslist = columnslist # is and idList [columnname1,columnname2,...,columnnamen] self.table = table self.columnslist_ref = columnslist_ref # is and idList [refcolumnname1,refcolumnname2,...,refcolumnname def graphAST(self, dot, parent): dot += parent + '->' + str(hash(self)) + '\n' dot += str(hash(self)) + '[label=\"ColumnForeignKey\"]\n' dot += str(hash(self)) + '->' + \ str(hash("FOREIGN") + hash(self)) + '\n' dot += str(hash("FOREIGN") + hash(self)) + \ '[label=\"' + "FOREIGN" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("KEY") + hash(self)) + '\n' dot += str(hash("KEY") + hash(self)) + \ '[label=\"' + "KEY" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("idList") + hash(self)) + '\n' dot += str(hash("idList") + hash(self)) + \ '[label=\"' + "idList" + '\"]\n' for column in self.columnslist: dot += str(hash("idList") + hash(self)) + '->' + \ str(hash("idList") + hash(self) + hash(column)) + '\n' dot += str(hash("idList") + hash(self) + hash(column)) + \ '[label=\"' + column + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("REFERENCES") + hash(self)) + '\n' dot += str(hash("REFERENCES") + hash(self)) + \ '[label=\"' + "REFERENCES" + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash(self.table) + hash(self)) + '\n' dot += str(hash(self.table) + hash(self)) + \ '[label=\"' + self.table + '\"]\n' dot += str(hash(self)) + '->' + \ str(hash("idRefList") + hash(self)) + '\n' dot += str(hash("idRefList") + hash(self)) + \ '[label=\"' + "idList" + '\"]\n' for column in self.columnslist_ref: dot += str(hash("idRefList") + hash(self)) + '->' + \ str(hash("idRefList") + hash(self) + hash(column)) + '\n' dot += str(hash("idRefList") + hash(self) + hash(column)) + \ '[label=\"' + column + '\"]\n' return dot
49.488455
159
0.448048
acedc576f21e4207a3e6701077de7a01fa6b449a
1,899
py
Python
tests/parse/test_parse_clnsig.py
ninanorgren/scout
1b2f110aac7bd66d331e3413f13bbda93af19127
[ "BSD-3-Clause" ]
1
2019-08-17T21:20:04.000Z
2019-08-17T21:20:04.000Z
tests/parse/test_parse_clnsig.py
ninanorgren/scout
1b2f110aac7bd66d331e3413f13bbda93af19127
[ "BSD-3-Clause" ]
null
null
null
tests/parse/test_parse_clnsig.py
ninanorgren/scout
1b2f110aac7bd66d331e3413f13bbda93af19127
[ "BSD-3-Clause" ]
null
null
null
from scout.parse.variant.clnsig import parse_clnsig def test_parse_clnsig_(): ## Test parsing classical clnsig representation variant = { 'info_dict':{ 'CLNACC': "RCV000014440.17|RCV000014441.25|RCV000014442.25|RCV000014443.17|RCV000184011.1|RCV000188658.1", 'CLNSIG': "5|5|5|5|5|5", 'CLNREVSTAT': "conf|single|single|single|conf|conf", } } ## WHEN parsing the clinical significance clnsig_annotations = parse_clnsig( acc=variant['info_dict']['CLNACC'], sig=variant['info_dict']['CLNSIG'], revstat=variant['info_dict']['CLNREVSTAT'], transcripts=[] ) ## THEN assert that they where parsed correct assert len(clnsig_annotations) == 6 for entry in clnsig_annotations: if entry['accession'] == "RCV000014440.17": assert entry['value'] == 5 assert entry['revstat'] == 'conf' ## Test parsing clnsig combination of values from different submitters: variant = { 'info_dict':{ 'CLNACC': "265359", 'CLNSIG': "Pathogenic/Likely pathogenic", 'CLNREVSTAT': "criteria_provided,_multiple_submitters,_no_conflicts", } } clinrevstat = variant['info_dict']['CLNREVSTAT'] revstat_groups = [rev.lstrip('_') for rev in clinrevstat.split(',')] clnsig_annotations = parse_clnsig( acc=variant['info_dict']['CLNACC'], sig=variant['info_dict']['CLNSIG'], revstat=variant['info_dict']['CLNREVSTAT'], transcripts=[] ) ## assert that they where parsed correct assert len(clnsig_annotations) == 2 for entry in clnsig_annotations: assert entry['accession'] == int(variant['info_dict']['CLNACC']) assert entry['value'] in ['Pathogenic', 'Likely pathogenic'] assert entry['revstat'] == ', '.join(revstat_groups)
33.910714
118
0.622433
acedc5ab234cfc57d82e50eda133e632d4c5fc2c
5,154
py
Python
python/sfan/__init__.py
SequentMicrosystems/SmartFan-rpi
61ba1ec352f448d145735663480d9ffff56802e1
[ "MIT" ]
15
2021-02-14T00:14:24.000Z
2022-03-26T20:28:49.000Z
python/sfan/__init__.py
SequentMicrosystems/SmartFan-rpi
61ba1ec352f448d145735663480d9ffff56802e1
[ "MIT" ]
5
2021-04-14T01:29:04.000Z
2021-05-16T13:02:27.000Z
python/sfan/__init__.py
SequentMicrosystems/SmartFan-rpi
61ba1ec352f448d145735663480d9ffff56802e1
[ "MIT" ]
4
2021-05-16T00:37:28.000Z
2022-03-18T16:07:40.000Z
import smbus import struct import RPi.GPIO as GPIO # bus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1) DEVICE_ADDRESS = 0x03 # 7 bit address (will be left shifted to add the read write bit) DEVICE_ALT_ADDRESS = 0x2C def checkCard(stack): if stack < 0 or stack > 1: raise ValueError('Invalid stack level [0..1]') bus = smbus.SMBus(1) I2C_MEM_REVISION_HW_MAJOR_ADD = 100 address = DEVICE_ADDRESS + stack try: val = bus.read_byte_data(address, I2C_MEM_REVISION_HW_MAJOR_ADD) except: address = DEVICE_ALT_ADDRESS + (stack ^ 1) try: val = bus.read_byte(address) except Exception as e: print(e) address = -1 bus.close() return address def setPower(stack, power): if power < 0 or power > 100: raise ValueError('Invalid power value [0..100]') POWER_ADDRESS = 0 address = checkCard(stack) if address < 0: return -1 if address > 4: val = 255 - power * 2.55 power = int(val) GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(12, GPIO.OUT) if power < 255: GPIO.output(12, GPIO.HIGH) else: GPIO.output(12, GPIO.LOW) try: bus = smbus.SMBus(1) bus.write_byte_data(address, POWER_ADDRESS, power) except Exception as e: print(e) bus.close() return -1 bus.close() return 1 def getPower(stack): address = checkCard(stack) if address < 0: return -1 POWER_ADDRESS = 0 try: bus = smbus.SMBus(1) if address > 4: val = bus.read_byte(address) else: val = bus.read_byte_data(address, POWER_ADDRESS) except Exception as e: print(e) bus.close() return -1 bus.close() if address > 4: val = int((255 - val) / 2.55) return val def setSafetyTemp(stack, temp): address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') if temp < 30 or temp > 80: raise ValueError('Invalid safety temperature value [30..80]') SAFETY_TEMP_ADDRESS = 3 try: bus = smbus.SMBus(1) bus.write_byte_data(address, SAFETY_TEMP_ADDRESS, temp) except Exception as e: print(e) bus.close() return -1 bus.close() return 1 def getSafetyTemp(stack): address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') SAFETY_TEMP_ADDRESS = 3 try: bus = smbus.SMBus(1) val = bus.read_byte_data(address, SAFETY_TEMP_ADDRESS); except Exception as e: print(e) bus.close() return -1 bus.close() return val def getProcTemp(stack): address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') TEMP_ADDRESS = 2 try: bus = smbus.SMBus(1) val = bus.read_byte_data(address, TEMP_ADDRESS); except Exception as e: print(e) bus.close() return -1 bus.close() return val def setLedBlink(stack, blink): val = 0 address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') if blink == 0: val = 1 BLINK_ADDRESS = 1 try: bus = smbus.SMBus(1) bus.write_byte_data(address, BLINK_ADDRESS, val) except Exception as e: print(e) bus.close() return -1 bus.close() return 1 def getLedBlink(stack): address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') BLINK_ADDRESS = 1 try: bus = smbus.SMBus(1) val = bus.read_byte_data(address, BLINK_ADDRESS); if val == 0: val = 1 else: val = 0 except Exception as e: print(e) bus.close() return -1 bus.close() return val def setStopInt(stack, seconds): address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') I2C_MEM_TIME_TO_STOP_SET = 5 try: bus = smbus.SMBus(1) bus.write_word_data(address, I2C_MEM_TIME_TO_STOP_SET, int(seconds)) except Exception as e: print(e) bus.close() return -1 bus.close() return 1 def getStopInt(stack): address = checkCard(stack) if address < 0: return -1 if address > 4: raise ValueError('Not available for this hardware version') I2C_MEM_TIME_TO_STOP_REM = 7 try: bus = smbus.SMBus(1) val = bus.read_word_data(address, I2C_MEM_TIME_TO_STOP_REM) except Exception as e: print(e) bus.close() return -1 bus.close() return val
24.311321
87
0.582848
acedc5b335cb531b64443b8cdff8479b31463b50
139
py
Python
lauztat/hypotests/__init__.py
marinang/statrise
c1b3e1bb1adca42ba0171a6066c5be9d4ff82e04
[ "BSD-3-Clause" ]
19
2019-03-21T18:58:55.000Z
2021-07-27T09:41:36.000Z
lauztat/hypotests/__init__.py
marinang/statrise
c1b3e1bb1adca42ba0171a6066c5be9d4ff82e04
[ "BSD-3-Clause" ]
null
null
null
lauztat/hypotests/__init__.py
marinang/statrise
c1b3e1bb1adca42ba0171a6066c5be9d4ff82e04
[ "BSD-3-Clause" ]
2
2019-03-13T13:03:27.000Z
2020-03-19T05:39:10.000Z
#!/usr/bin/python from .discovery import Discovery from .upperlimit import UpperLimit from .confidence_interval import ConfidenceInterval
23.166667
51
0.841727
acedc5be7e0aa105327d2336c6deffbb74d7fd34
818
py
Python
2020/Python/day01/part2.py
tymscar/Advent-Of-Code
cd7b96b0253191e236bd704b0d8b5540fb3e8ef6
[ "MIT" ]
4
2019-12-08T08:20:53.000Z
2021-12-17T12:04:11.000Z
2020/Python/day01/part2.py
tymscar/AdventOfCode2018
9742ddb6bbbc917062baad87d6b6de75375f1ae8
[ "MIT" ]
null
null
null
2020/Python/day01/part2.py
tymscar/AdventOfCode2018
9742ddb6bbbc917062baad87d6b6de75375f1ae8
[ "MIT" ]
4
2020-12-11T22:10:24.000Z
2021-12-25T22:39:05.000Z
def product_of_n_numbers_that_sum_to(list_of_numbers, value_they_have_to_sum_to, how_many_numbers_to_sum): if how_many_numbers_to_sum == 0: return 1 if how_many_numbers_to_sum == 1: if value_they_have_to_sum_to in list_of_numbers: return value_they_have_to_sum_to else: return 0 for number in list_of_numbers: complement = value_they_have_to_sum_to - number product = product_of_n_numbers_that_sum_to(list_of_numbers, complement, how_many_numbers_to_sum - 1) if product > 0: return number * product return 0 def part_2(): file = open('input.txt', 'r') expenses = {} for line in file: expenses[int(line)] = True return product_of_n_numbers_that_sum_to(expenses, 2020, 3) print(part_2())
27.266667
108
0.687042
acedc5c73ed2bd4fb415f07223d6a4118c34f5a2
71
py
Python
psy/fa/__init__.py
cegfdb/IRT
20fcde3b385bce1644fecab7cdc8bda5beacda03
[ "MIT" ]
169
2017-08-29T01:35:49.000Z
2022-03-01T05:03:02.000Z
psy/fa/__init__.py
a854367688/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
8
2017-12-05T05:20:35.000Z
2021-10-03T05:40:45.000Z
psy/fa/__init__.py
a854367688/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
67
2017-09-01T04:18:54.000Z
2022-02-24T08:21:18.000Z
from psy.fa.rotations import GPForth from psy.fa.factors import Factor
23.666667
36
0.830986
acedc67c47ccb22b4b743df903a8b0cdbc44460f
7,501
py
Python
src/m3_graphical_accumulating.py
mossac/04-TheAccumulatorPattern
908d07ed9b1fe85bc57d5da39a9e9d23ddb70b99
[ "MIT" ]
null
null
null
src/m3_graphical_accumulating.py
mossac/04-TheAccumulatorPattern
908d07ed9b1fe85bc57d5da39a9e9d23ddb70b99
[ "MIT" ]
null
null
null
src/m3_graphical_accumulating.py
mossac/04-TheAccumulatorPattern
908d07ed9b1fe85bc57d5da39a9e9d23ddb70b99
[ "MIT" ]
null
null
null
""" This module lets you practice one form of the ACCUMULATOR pattern, namely, the "IN GRAPHICS" form which features: -- DRAWING OBJECTS via ACCUMULATING positions and/or sizes, as in: x = x + pixels Additionally, it emphasizes that you must ** DO A CONCRETE EXAMPLE BY HAND ** before you can implement a solution to the problem in Python. Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays, Aaron Wilkin, their colleagues, and Aidan Moss. """ # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. import rosegraphics as rg # ----------------------------------------------------------------------------- # Students: As you work each of these problems, ask yourself: # 1. Do I need a loop? # If so, HOW MANY LOOPS? # # 2. Where I need a loop, what needs to happen: # -- BEFORE the loop? # -- IN the loop? # -- AFTER the loop? # ----------------------------------------------------------------------------- def main(): """ Calls the TEST functions in this module. """ run_test_draw_parallel_lines() run_test_draw_lines() def run_test_draw_parallel_lines(): """ Tests the draw_parallel_lines function. """ print() print('--------------------------------------------------') print('Testing the draw_parallel_lines function:') print(' See the graphics windows that pop up.') print('--------------------------------------------------') # ------------------------------------------------------------------------- # TWO tests on ONE window. # ------------------------------------------------------------------------- title = 'Tests 1 and 2 of DRAW_PARALLEL_LINES:' title = title + ' 4 long lines, 7 short lines' window1 = rg.RoseWindow(600, 350, title) # Test 1: left_most_point = rg.Point(400, 50) draw_parallel_lines(7, left_most_point, 100, window1) # Test 2: left_most_point = rg.Point(50, 200) draw_parallel_lines(4, left_most_point, 300, window1) window1.close_on_mouse_click() # ------------------------------------------------------------------------- # A third test on ANOTHER window. # ------------------------------------------------------------------------- title = 'Test 3 of DRAW_PARALLEL_LINES: 12 very long lines!' window2 = rg.RoseWindow(500, 400, title) # Test 3: left_most_point = rg.Point(20, 20) draw_parallel_lines(12, left_most_point, 470, window2) window2.close_on_mouse_click() def draw_parallel_lines(n, point, length, window): y = point.y x = point.x for k in range(n+1): end = rg.Point(x + length, y) line=rg.Line(rg.Point(x,y),end) y = y+30 line.attach_to(window) window.render() """ What comes in: The four arguments are: -- A positive integer n. -- An rg.Point. -- A positive integer length. -- An rg.RoseWindow. What goes out: Nothing (i.e., None). Side effects: See draw_parallel_lines.pdf in this project for pictures that may help you better understand the following specification: Draws n rg.Lines parallel to each other, all on the given rg.RoseWindow, such that: -- The first rg.Line has its left-most end at the given rg.Point. -- Each rg.Line is a horizontal line (i.e., parallel to the x-axis). -- Each rg.Line has the given length. -- Each rg.Line is 30 pixels below the previous rg.Line. Must ** render ** but ** NOT close ** the window. Type hints: :type n: int :type point: rg.Point :type length: int :type window: rg.RoseWindow """ # ------------------------------------------------------------------------- # DONE: 2. Implement and test this function. # Tests have been written for you (above). # # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern, # as in draw_row_of_circles in m1e, # instead of directly using the loop variable. # ########################################################################### # HINT: To figure out the code that computes the necessary # endpoints for each line, # ** FIRST DO A CONCRETE EXAMPLE BY HAND! ** ########################################################################### # ------------------------------------------------------------------------- def run_test_draw_lines(): """ Tests the draw_lines function. """ print() print('--------------------------------------------------') print('Testing the draw_lines function:') print(' See the graphics windows that pop up.') print('--------------------------------------------------') # TWO tests on ONE window. title = 'Tests 1 & 2 of DRAW_LINES: 4 lines, 12 lines!' window1 = rg.RoseWindow(350, 400, title) draw_lines(4, rg.Point(20, 120), window1) draw_lines(12, rg.Point(150, 230), window1) window1.close_on_mouse_click() # A third test on ANOTHER window. window2 = rg.RoseWindow(350, 300, 'Test 3 of DRAW_LINES: 7 lines!') draw_lines(7, rg.Point(50, 120), window2) window2.close_on_mouse_click() def draw_lines(n, point, window): y = point.y x = point.x y = y-100 z = 200/(n-1) for k in range(n): end = rg.Point(x + 100, y) line = rg.Line(point, end) y = y + z line.attach_to(window) window.render() """ What comes in: The three arguments are: -- A integer n that is at least 2. -- An rg.Point. -- An rg.RoseWindow. What goes out: Nothing (i.e., None). Side effects: See draw_lines.pdf in this project for pictures that may help you better understand the following specification: Draws n rg.Lines on the given rg.RoseWindow, such that: -- The leftmost point of each of the rg.Lines is the given rg.Point. -- For the rightmost point of each of the lines: -- Its x-coordinate is (pX + 100), where pX is the x-coordinate of the given rg.Point. -- The y-coordinates of the lines vary evenly from (pY - 100) to (pY + 100), where pY is the y-coordinate of the given rg.Point. Must ** render ** but ** NOT close ** the window. Type hints: :type n: int :type point: rg.Point :type window: rg.RoseWindow """ # ------------------------------------------------------------------------- # DONE: 3. Implement and test this function. # Tests have been written for you (above). # # CONSIDER using the ACCUMULATOR IN GRAPHICS pattern, # as in draw_row_of_circles in m1e, # instead of directly using the loop variable. # ########################################################################### # HINT: To figure out the code that computes the necessary # endpoints for each line, # ** FIRST DO A CONCRETE EXAMPLE BY HAND! ** ########################################################################### # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Calls main to start the ball rolling. # ----------------------------------------------------------------------------- main()
36.590244
79
0.494601
acedc6c3eb5b2db028965f9a4c2555d8acd25382
1,683
py
Python
scripts/generate_silver_train.py
zhangyao1627-zhang/Ask2Transformers
6992144cfdc496bcb4abc91bf9794c8ffe8b7f04
[ "Apache-2.0" ]
null
null
null
scripts/generate_silver_train.py
zhangyao1627-zhang/Ask2Transformers
6992144cfdc496bcb4abc91bf9794c8ffe8b7f04
[ "Apache-2.0" ]
null
null
null
scripts/generate_silver_train.py
zhangyao1627-zhang/Ask2Transformers
6992144cfdc496bcb4abc91bf9794c8ffe8b7f04
[ "Apache-2.0" ]
1
2022-03-20T02:08:10.000Z
2022-03-20T02:08:10.000Z
from argparse import ArgumentParser import json import os import sys import numpy as np sys.path.append("./") from a2t.legacy.relation_classification.tacred import TACRED_LABELS from a2t.legacy.relation_classification.utils import apply_threshold, f1_score_ parser = ArgumentParser() parser.add_argument("--preds", type=str, default="experiments/re_train_0.01") parser.add_argument("--data", type=str, default="data/tacred/train.json") parser.add_argument("--split", type=str, default="data/tacred/splits/0.25.split.txt") parser.add_argument("--threshold", type=float, default=0.8908) def main(args): with open(args.data) as f: train_data = json.load(f) preds = np.load(os.path.join(args.preds, "output.npy")) preds = apply_threshold(preds, threshold=args.threshold) labels = np.load(os.path.join(args.preds, "labels.npy")) label2id = {v: i for i, v in enumerate(TACRED_LABELS)} with open(args.split) as f: labeled_split = [line.strip() for line in f] new_train_instances = [] labels_, preds_ = [], [] for pred, inst in zip(preds, train_data): if inst["id"] in labeled_split: continue # FOR TESTING labels_.append(label2id[inst["relation"]]) preds_.append(pred) # new_inst = inst.copy() new_inst["relation"] = TACRED_LABELS[pred] new_train_instances.append(new_inst) print(args.preds, f1_score_(labels, preds), f1_score_(labels_, preds_)) with open(os.path.join(args.preds, "train.silver.json"), "wt") as f: json.dump(new_train_instances, f, indent=4) if __name__ == "__main__": args = parser.parse_args() main(args)
30.6
85
0.683898
acedc71d6cebaaf1e213ff0a228db79ec3eead17
32,228
py
Python
scHiCTools/load/ContactMaps.py
p123hx/scHiC-py
d25524bdcd552d49e82b2d410339c52b0de4a34c
[ "MIT" ]
null
null
null
scHiCTools/load/ContactMaps.py
p123hx/scHiC-py
d25524bdcd552d49e82b2d410339c52b0de4a34c
[ "MIT" ]
null
null
null
scHiCTools/load/ContactMaps.py
p123hx/scHiC-py
d25524bdcd552d49e82b2d410339c52b0de4a34c
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import sys from copy import deepcopy from scipy.sparse import coo_matrix from .load_hic_file import get_chromosome_lengths, load_HiC from ..embedding import pairwise_distances, MDS, tSNE, PHATE, SpectralEmbedding, PCA from .processing_utils import matrix_operation # from ..analysis import scatter from ..analysis import kmeans, spectral_clustering, HAC import matplotlib.pyplot as plt try: import multiprocessing as mp except: mp=None def add_cell(ch, idx, file, resolution, chromosome_lengths, store_full_map, keep_n_strata, format, customized_format, header, adjust_resolution, map_filter, sparse, gzip, operations, sep=' ',): if ('ch' in ch) and ('chr' not in ch): ch=ch.replace("ch", "chr") mat, strata = load_HiC( file, genome_length=chromosome_lengths, format=format, custom_format=customized_format, header=header, chromosome=ch, resolution=resolution, resolution_adjust=adjust_resolution, map_filter=map_filter, sparse=sparse, gzip=gzip, keep_n_strata=keep_n_strata, operations=operations, sep=sep) contacts=np.sum(mat)/2+ np.trace(mat)/2 short_range=sum([np.sum(mat[i,i:i+int(2000000/resolution)]) for i in range(len(mat))]) mitotic=sum([np.sum(mat[i,i+int(2000000/resolution):i+int(12000000/resolution)]) for i in range(len(mat))]) # if not keep_n_strata: # self.strata[ch][idx] = strata # for strata_idx, stratum in enumerate(strata): # self.strata[ch][strata_idx][idx, :] = stratum if store_full_map: return [contacts, short_range, mitotic, strata, mat] else: return [contacts, short_range, mitotic, strata] # else: # raise ValueError('`keep_n_strata` should be an positive intger.') class scHiCs: def __init__(self, list_of_files, reference_genome, resolution, adjust_resolution=True, sparse=False, chromosomes='all', format='customized', keep_n_strata=10, store_full_map=False, operations=None, header=0, customized_format=None, map_filter=0., gzip=False, sep=' ', parallelize=False, n_processes=None, **kwargs): """ Parameters ---------- list_of_files : list List of HiC file paths. reference_genome : str or dict Now supporting 'mm9', 'mm10', 'hg19', 'hg38', if using other references,you can simply provide the chromosome name and corresponding size (bp) with a dictionary in Python. e.g. {'chr1': 150000000, 'chr2': 130000000, 'chr3': 200000000} resolution : int The resolution to separate genome into bins. If using .hic file format, the given resolution must match with the resolutions in .hic file. adjust_resolution : bool, optional Whether to adjust resolution for input file. Sometimes the input file is already in the proper resolution (e.g. position 3000000 has already been changed to 6 in 500kb resolution), then you can set `adjust_resolution=False`. The default is True. sparse : bool, optional Whether to use sparse matrix to store (only effective when max_distance=None). The default is False. chromosomes : list or str, optional Chromosomes to use, eg. ['chr1', 'chr2'], or just 'except Y', 'except XY','all', which means chr 1-19 + XY for mouse and chr 1-22 + XY for human. The default is 'all'. format : str, optional HiC files' format. e.g., '.hic', 'customized', '.cool'. The default is 'customized'. keep_n_strata : int, optional Only consider contacts within this genomic distance. If `None`, it will store full matrices in numpy matrix or scipy sparse format, which will use too much memory sometimes. The default is 10. store_full_map : bool, optional Whether store contact maps. The default is False. operations : list, optional The methods use for pre-processing or smoothing the maps given in a list. The operations will happen in the given order. Operations: 'convolution', 'random_walk', 'network_enhancing'. For pre-processing and smoothing operations, sometimes you need additional arguments. You can check docstrings for pre-processing and smoothing for more information. The default is None. header : int, optional The number of header line(s). If `header=0`, HiC files do not have header. The default is 0. customized_format : int or list, optional Format for each line. The default is None. map_filter : float, optional The threshold to filter some reads by map quality. The default is 0.. gzip : bool, optional If the HiC files are zip files. If `True`, the HiC files are zip files. The default is False. parallelize : bool, optional If `True`, parallelize file reading process. The default is False. n_processes : int, optional Number of cores to use in parallelization. When n_processes=Null, use number of CPUs -1 for parallelization. The default is Null. **kwargs : Other arguments specify smoothing methods passed to function. See `scHiCTools.load.processing_utils.matrix_operation` function. Returns ------- None. """ self.resolution = resolution self.chromosomes, self.chromosome_lengths = get_chromosome_lengths(reference_genome, chromosomes, resolution) self.num_of_cells = len(list_of_files) self.sparse = sparse self.keep_n_strata = keep_n_strata self.contacts=np.array([0]*len(list_of_files)) self.short_range=np.array([0.0]*len(list_of_files)) self.mitotic=np.array([0.0]*len(list_of_files)) self.files=list_of_files self.strata = { ch: [np.zeros((self.num_of_cells, self.chromosome_lengths[ch] - i)) for i in range(keep_n_strata)] for ch in self.chromosomes} if keep_n_strata else None self.full_maps = None self.similarity_method=None self.distance=None assert keep_n_strata is not None or store_full_map is True if not store_full_map: self.full_maps = None elif sparse: self.full_maps = {ch: [None] * self.num_of_cells for ch in self.chromosomes} else: self.full_maps = { ch: np.zeros((self.num_of_cells, self.chromosome_lengths[ch], self.chromosome_lengths[ch])) for ch in self.chromosomes} print('Loading HiC data...') if parallelize: if mp is None: raise ImportError('Need `multiprocessing` installed to parallelize data loading process.') if n_processes is None: n_processes=mp.cpu_count()-1 # print(n_processes) for ch in self.chromosomes: pool = mp.Pool(n_processes) results = [pool.apply(add_cell, args=( ch,idx, file, self.resolution, self.chromosome_lengths, store_full_map, keep_n_strata, format, customized_format, header, adjust_resolution, map_filter, sparse, gzip, operations) ) for idx, file in enumerate(self.files)] if store_full_map: for idx in range(len(self.files)): self.full_maps[ch][idx]=results[idx].pop(4) for idx in range(len(self.files)): strata=results[idx].pop(3) for strata_idx, stratum in enumerate(strata): self.strata[ch][strata_idx][idx, :]=stratum self.contacts+=[int(i[0]) for i in results] self.short_range+=[i[1] for i in results] self.mitotic+=[i[2] for i in results] pool.close() sys.stdout.write('\r') sys.stdout.write("Process chromosome: {} ".format(ch)) sys.stdout.flush() else: for idx, file in enumerate(self.files): # print('Processing {0} out of {1} files: {2}'.format(idx+1,len(list_of_files),file)) for ch in self.chromosomes: if ('ch' in ch) and ('chr' not in ch): ch=ch.replace("ch", "chr") mat, strata = load_HiC( file, genome_length=self.chromosome_lengths, format=format, custom_format=customized_format, header=header, chromosome=ch, resolution=resolution, resolution_adjust=adjust_resolution, map_filter=map_filter, sparse=sparse, gzip=gzip, keep_n_strata=keep_n_strata, operations=operations, sep=sep, **kwargs) self.contacts[idx]+=np.sum(mat)/2+ np.trace(mat)/2 # ?? self.short_range[idx]+=sum([np.sum(mat[i,i:i+int(2000000/self.resolution)]) for i in range(len(mat))]) self.mitotic[idx]+=sum([np.sum(mat[i,i+int(2000000/self.resolution):i+int(12000000/self.resolution)]) for i in range(len(mat))]) if store_full_map: self.full_maps[ch][idx] = mat if keep_n_strata: # self.strata[ch][idx] = strata for strata_idx, stratum in enumerate(strata): self.strata[ch][strata_idx][idx, :] = stratum sys.stdout.write('\r') sys.stdout.write("[%-50s] %d/%d \t" % ('='*int((idx+1)/len(self.files)*50), idx+1,len(self.files))) # File %s loaded ,file sys.stdout.flush() def cal_strata(self, n_strata): """ Alter the number of strata kept in a `scHiCs` object. Parameters ---------- n_strata : int Number of strata to keep. Returns ------- dict Strata of cells. """ if self.full_maps is None: if self.keep_n_strata <= n_strata: print(' Only {0} strata are kept!'.format(self.keep_n_strata)) return deepcopy(self.strata) else: return deepcopy({ch: self.strata[ch][:n_strata] for ch in self.chromosomes}) else: if self.keep_n_strata is None: new_strata = { ch: [np.zeros((self.num_of_cells, self.chromosome_lengths[ch] - i)) for i in range(n_strata)] for ch in self.chromosomes} for ch in self.chromosomes: for idx in range(self.num_of_cells): fmap = self.full_maps[ch][idx].toarray() if self.sparse else self.full_maps[ch][idx] for i in range(n_strata): new_strata[ch][i][idx, :] = np.diag(fmap[i:, :-i]) return new_strata elif self.keep_n_strata >= n_strata: return deepcopy({ch: self.strata[ch][:n_strata] for ch in self.chromosomes}) else: for ch in self.chromosomes: self.strata[ch] += [(np.zeros(self.num_of_cells, self.chromosome_lengths[ch] - i)) for i in range(self.keep_n_strata, n_strata)] for idx in range(self.num_of_cells): fmap = self.full_maps[ch][idx].toarray() if self.sparse else self.full_maps[ch][idx] for i in range(self.keep_n_strata, n_strata): self.strata[ch][i][idx, :] = np.diag(fmap[i:, :-i]) return deepcopy(self.strata) def test_print_chr8_inner(self): pair_dis = pairwise_distances(self.strata['chr8'], 'inner_product') print(pair_dis) def test_selfish(self): chrs = ["chr1", "chr2", "chrX", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chrY"] for chr in chrs: pair_dis = pairwise_distances(self.strata[chr], 'selfish') def test_fast(self): chrs = ["chr1", "chr2", "chrX", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chrY"] timeS =0.0; for chr in chrs: pair_dis, time = pairwise_distances(self.strata['chr8'], 'hicrep') timeS += time print("all chr total: ", timeS) def processing(self, operations, **kwargs): """ Apply a smoothing method to contact maps. Requre the `scHiCs` object to store the full map of contacts maps. Parameters ---------- operations : str The methods use for smoothing the maps. Avaliable operations: 'convolution', 'random_walk', 'network_enhancing'. **kwargs : Other arguments specify smoothing methods passed to function. See function `scHiCTools.load.processing_utils.matrix_operation`. Returns ------- None. """ if self.full_maps is None: raise ValueError('No full maps stored. Processing is not doable.') if self.sparse: for ch in self.chromosomes: for i, mat in enumerate(self.full_maps[ch]): self.full_maps[ch][i] = coo_matrix(matrix_operation(mat.toarray(), operations, **kwargs)) else: for ch in self.chromosomes: for i, mat in enumerate(self.full_maps[ch]): self.full_maps[ch][i, :, :] = matrix_operation(mat, operations, **kwargs) # Update the strata if self.keep_n_strata is not None: for ch in self.chromosomes: for i, mat in enumerate(self.full_maps[ch]): for j in range(self.keep_n_strata): self.strata[ch][j][i, :] = np.diag(mat[j:, :len(mat) - j]) def plot_contacts(self, hist=True, percent=True, size=1.0, bins=10, color='#1f77b4'): """ Generate two plots: Histogram of contacts and scatter plot of short-range contacts v.s. contacts at the mitotic band. Parameters ---------- hist : bool, optional Whether to plot Histogram of contacts. If `True`, plot Histogram of contacts. The default is True. percent : int, optional Whether to plot scatter plot of short-range contacts v.s. contacts at the mitotic band. If `True`, plot scatter plot of short-range contacts v.s. contacts at the mitotic band. The default is True. size : float, optional The point size of scatter plot. The default is 1.0. bins : int, optional Number of bins in histogram. The default is 10. color : str, optional The color of the plot. The default is '#1f77b4'. Returns ------- None. """ if hist: if percent: plt.subplot(1,2,1) plt.hist(self.contacts,bins=bins,color=color) plt.xlabel("Number of contacts") plt.ylabel('Frequency') plt.title('Histogram of contacts') if percent: if hist: plt.subplot(1,2,2) plt.scatter(self.mitotic*100/self.contacts,self.short_range*100/self.contacts, s=size, c=color) plt.xlabel("% Mitotic contacts") plt.ylabel("% Short-range contacts") plt.title('Short-range contacts v.s. contacts at the mitotic band') def select_cells(self, n_contacts=[0,float("inf")], short_range=[0,1], mitotic=[0,1], selected=None): """ Select qualify cells based on minimum number of contacts and maxium percent of short range contact. Parameters ---------- min_n_contacts : int, optional The threshold of minimum number of contacts in each cell. The default is 0. short_range : list, optional The threshold of minimum and maximum proportion of short range contact in every cell. The default is [0,1]. mitotic : list, optional The threshold of minimum and maximum proportion of mitotic contact in every cell. The default is [0,1]. selected : list, optional A list of cells to be selected. Elements in the list can be either bool or str. The default is None. Returns ------- list Selected files. """ # files=np.array(self.files) if selected is None: selected=[True]*self.num_of_cells if np.all([type(i)==str for i in selected]): selected=[True if i in selected else False for i in self.files] elif not np.all([type(i)==bool for i in selected]): raise ValueError("Elements of `selected` should be bool or str!") elif len(selected)!=len(self.files): raise ValueError("When elements of `selected` are bool, length of `selected` should be length of files({})!".format(len(self.files))) selected=np.all([selected, short_range[0] <= self.short_range/self.contacts, self.short_range/self.contacts <= short_range[1], n_contacts[0] <= self.contacts, self.contacts <= n_contacts[1], mitotic[0] <= self.mitotic/self.contacts, self.mitotic/self.contacts <= mitotic[1] ],axis=0) self.num_of_cells=sum(selected) self.files=[self.files[i] for i in range(len(self.files)) if selected[i]] self.contacts=self.contacts[selected] self.short_range=self.short_range[selected] self.mitotic=self.mitotic[selected] if self.strata is not None: for ch in self.chromosomes: for s in range(self.keep_n_strata): self.strata[ch][s]=self.strata[ch][s][selected,:] if self.full_maps is not None: for ch in self.chromosomes: self.full_maps[ch]=self.full_maps[ch][selected] if self.distance is not None: self.distance=self.distance[:,selected,:][:,:,selected] return self.files def scHiCluster(self,dim=2,n_clusters=4,cutoff=0.8,n_PCs=10,**kwargs): """ Embedding and clustering single cells using HiCluster. Reference: Zhou J, Ma J, Chen Y, Cheng C, Bao B, Peng J, et al. Robust single-cell Hi-C clustering by convolution- and random-walk–based imputation. PNAS. 2019 Jul 9;116(28):14011–8. Parameters ---------- dim : int, optional Number of dimension of embedding. The default is 2. n_clusters : int, optional Number of clusters. The default is 4. cutoff : float, optional The cutoff proportion to convert the real contact matrix into binary matrix. The default is 0.8. n_PCs : int, optional Number of principal components. The default is 10. **kwargs : Other arguments passed to kmeans. See `scHiCTools.analysis.clustering.kmeans` function. Returns ------- embeddings : numpy.ndarray The embedding of cells using HiCluster. label : numpy.ndarray An array of cell labels clustered by HiCluster. """ if self.full_maps is None: raise ValueError('No full maps stored. scHiCluster is not doable.') X=None for ch in self.chromosomes: sys.stdout.write('\r') sys.stdout.write('HiCluster processing chromosome {}. '.format(ch)) # print('HiCluster processing chromosomes {}'.format(ch)) A=self.full_maps[ch].copy() if len(A.shape)==3: n=A.shape[1]*A.shape[2] A.shape=(A.shape[0],n) A=np.quantile(A,cutoff,axis=1)<np.transpose(A) A = PCA(A.T,n_PCs) if X is None: X=A else: X=np.append(X, A, axis=1) X=PCA(X,n_PCs) label=kmeans(X,n_clusters,kwargs.pop('weights',None),kwargs.pop('iteration',1000)) return X[:,:dim], label def learn_embedding(self, similarity_method, embedding_method, dim=2, aggregation='median', n_strata=None, return_distance=False, print_time=False, parallelize=False, n_processes=1, **kwargs): """ Function to find a low-dimensional embedding for cells. Parameters ---------- similarity_method : str The method used to calculate similarity matrix. Now support 'inner_product', 'HiCRep' and 'Selfish'. embedding_method : str The method used to project cells into lower-dimensional space. Now support 'MDS', 'tSNE', 'phate', 'spectral_embedding'. dim : int, optional Dimension of the embedding space. The default is 2. aggregation : str, optional Method to find the distance matrix based on distance matrices of chromesomes. Must be 'mean' or 'median'. The default is 'median'. n_strata : int, optional Number of strata used in calculation. The default is None. return_distance : bool, optional Whether to return the distance matrix of cells. If True, return (embeddings, distance_matrix); if False, only return embeddings. The default is False. print_time : bool, optional Whether to print process time. The default is False. **kwargs : Including two arguments for Selfish (see funciton `pairwise_distances`):\ `n_windows`: number of Selfish windows\ `sigma`: sigma in the Gaussian-like kernel\ and some arguments specify different embedding method (see functions in `scHiCTools.embedding.embedding`). Returns ------- embeddings: numpy.ndarray The embedding of cells in lower-dimensional space. final_distance: numpy.ndarray, optional The pairwise distance calculated. """ if self.distance is None or self.similarity_method!=similarity_method: self.similarity_method=similarity_method distance_matrices = [] assert embedding_method.lower() in ['mds', 'tsne', 'umap', 'phate', 'spectral_embedding'] assert n_strata is not None or self.keep_n_strata is not None n_strata = n_strata if n_strata is not None else self.keep_n_strata new_strata = self.cal_strata(n_strata) if print_time: time1=0 time2=0 for ch in self.chromosomes: print(ch) distance_mat,t1,t2 = pairwise_distances(new_strata[ch], similarity_method, print_time, kwargs.get('sigma',.5), kwargs.get('window_size',10),parallelize, n_processes) time1=time1+t1 time2=time2+t2 distance_matrices.append(distance_mat) print('Sum of time 1:', time1) print('Sum of time 2:', time2) else: for i,ch in enumerate(self.chromosomes): # print(ch) distance_mat = pairwise_distances(new_strata[ch], similarity_method, print_time, kwargs.get('sigma',.5), kwargs.get('window_size',10)) distance_matrices.append(distance_mat) sys.stdout.write('\r') sys.stdout.write("[%-30s] %d/%d \t Calculating chromosome %s. " % ('='*int((i+1)/len(self.chromosomes)*30),i+1,len(self.chromosomes),ch)) self.distance = np.array(distance_matrices) if aggregation == 'mean': final_distance = np.mean(self.distance, axis=0) elif aggregation == 'median': final_distance = np.median(self.distance, axis=0) else: raise ValueError('Aggregation method {0} not supported. Only "mean" or "median".'.format(aggregation)) np.fill_diagonal(final_distance, 0) embedding_method = embedding_method.lower() if embedding_method == 'mds': embeddings = MDS(final_distance, dim) elif embedding_method == 'tsne': embeddings = tSNE(final_distance, dim, kwargs.pop('perp',30), kwargs.pop('iteration',1000), kwargs.pop('momentum', 0.5), kwargs.pop('rate', 200), kwargs.pop('tol',1e-5)) # elif embedding_method == 'umap': # embeddings = UMAP(final_distance, dim, # kwargs.pop('n',5), # kwargs.pop('min_dist',1), # kwargs.pop('n_epochs',10), # kwargs.pop('alpha',1), # kwargs.pop('n_neg_samples',0)) elif embedding_method == 'phate': embeddings = PHATE(final_distance, dim, kwargs.pop('k',5), kwargs.pop('a',1), kwargs.pop('gamma',1), kwargs.pop('t_max',100), kwargs.pop('momentum',.1), kwargs.pop('iteration',1000)) elif embedding_method == 'spectral_embedding': graph=np.exp(-np.square(final_distance)/np.mean(final_distance**2)) graph = graph-np.diag(graph.diagonal()) embeddings = SpectralEmbedding(graph, dim) else: raise ValueError('Embedding method {0} not supported. '.format(embedding_method)) if return_distance: return embeddings, final_distance else: return embeddings def clustering(self, n_clusters, clustering_method, similarity_method, aggregation='median', n_strata=None, print_time=False, **kwargs): """ Parameters ---------- n_clusters : int Number of clusters. clustering_method : str Clustering method in 'kmeans', 'spectral_clustering' or 'HAC'(hierarchical agglomerative clustering). similarity_method : str Reproducibility measure. Value in ‘InnerProduct’, ‘HiCRep’ or ‘Selfish’. aggregation : str, optional Method to aggregate different chromosomes. Value is either 'mean' or 'median'. The default is 'median'. n_strata : int or None, optional Only consider contacts within this genomic distance. If it is None, it will use the all strata kept from previous loading process. The default is None. print_time : bool, optional Whether to print the processing time. The default is False. **kwargs : Other arguments pass to function `scHiCTools.embedding.reproducibility.pairwise_distances `, and the clustering function in `scHiCTools.analysis.clustering`. Returns ------- label : numpy.ndarray An array of cell labels clustered. """ if self.distance is None or self.similarity_method!=similarity_method: self.similarity_method=similarity_method distance_matrices = [] assert n_strata is not None or self.keep_n_strata is not None n_strata = n_strata if n_strata is not None else self.keep_n_strata new_strata = self.cal_strata(n_strata) for i,ch in enumerate(self.chromosomes): # print(ch) distance_mat = pairwise_distances(new_strata[ch], similarity_method, print_time, kwargs.get('sigma',.5), kwargs.get('window_size',10)) distance_matrices.append(distance_mat) sys.stdout.write('\r') sys.stdout.write("[%-30s] %d/%d \t Calculating chromosome %s. " % ('='*int((i+1)/len(self.chromosomes)*30), i+1, len(self.chromosomes), ch)) self.distance = np.array(distance_matrices) if aggregation == 'mean': final_distance = np.mean(self.distance, axis=0) elif aggregation == 'median': final_distance = np.median(self.distance, axis=0) else: raise ValueError('Aggregation method {0} not supported. Only "mean" or "median".'.format(aggregation)) np.fill_diagonal(final_distance, 0) clustering_method=clustering_method.lower() if clustering_method=='kmeans': embeddings = MDS(final_distance, n_clusters) label=kmeans(embeddings, k=n_clusters, **kwargs) elif clustering_method=='spectral_clustering': label=spectral_clustering(final_distance, data_type='distance_matrix', n_clusters=n_clusters, **kwargs) elif clustering_method=='hac': label=HAC(final_distance, 'distance_matrix', n_clusters, kwargs.pop('method','centroid')) else: raise ValueError('Embedding method {0} not supported. '.format(clustering_method)) return label
40.436637
185
0.5386
acedc7cfa32c62628cc483ed81dc3e424da06faf
4,063
py
Python
self_paced_ensemble/utils/_validation_data.py
ZhiningLiu1998/self-paced-ensemble
461d00fbedd204a28922a525ec4eeb0cde7ee906
[ "MIT" ]
203
2019-06-04T07:43:25.000Z
2022-03-30T22:16:32.000Z
self_paced_ensemble/utils/_validation_data.py
ZhiningLiu1998/self-paced-ensemble
461d00fbedd204a28922a525ec4eeb0cde7ee906
[ "MIT" ]
14
2020-02-26T09:42:46.000Z
2022-01-11T12:25:16.000Z
self_paced_ensemble/utils/_validation_data.py
ZhiningLiu1998/self-paced-ensemble
461d00fbedd204a28922a525ec4eeb0cde7ee906
[ "MIT" ]
46
2019-11-25T01:13:31.000Z
2021-12-29T06:49:07.000Z
"""Utilities for data validation.""" # Authors: Zhining Liu <zhining.liu@outlook.com> # License: MIT from collections import OrderedDict from sklearn.utils import check_X_y VALID_DATA_INFO = "'eval_datasets' should be a `dict` of validation data," + \ " e.g., {..., dataset_name : (X_valid, y_valid), ...}." TRAIN_DATA_NAME = "train" def _check_eval_datasets_name(data_name): if not isinstance(data_name, str): raise TypeError( VALID_DATA_INFO + \ f" The keys must be `string`, got {type(data_name)}, " + \ f" please check your usage." ) if data_name == TRAIN_DATA_NAME: raise ValueError( f"The name {TRAIN_DATA_NAME} is reserved for the training" f" data (it will automatically add into the 'eval_datasets_'" f" attribute after calling `fit`), please use another name" f" for your evaluation dataset." ) return data_name def _check_eval_datasets_tuple(data_tuple, data_name, **check_x_y_kwargs): if not isinstance(data_tuple, tuple): raise TypeError( VALID_DATA_INFO + \ f" The value of '{data_name}' is {type(data_tuple)} (should be tuple)," + \ f" please check your usage." ) elif len(data_tuple) != 2: raise ValueError( VALID_DATA_INFO + \ f" The data tuple of '{data_name}' has {len(data_tuple)} element(s)" + \ f" (should be 2), please check your usage." ) else: X, y = check_X_y(data_tuple[0], data_tuple[1], **check_x_y_kwargs) return (X, y) def _check_eval_datasets_dict(eval_datasets_dict, **check_x_y_kwargs): if TRAIN_DATA_NAME in eval_datasets_dict.keys(): raise ValueError( f"The name '{TRAIN_DATA_NAME}' could not be used" f" for the validation datasets. Please use another name." ) eval_datasets_dict_ = {} for data_name, data_tuple in eval_datasets_dict.items(): data_name_ = _check_eval_datasets_name(data_name) data_tuple_ = _check_eval_datasets_tuple(data_tuple, data_name_, **check_x_y_kwargs) eval_datasets_dict_[data_name_] = data_tuple_ return eval_datasets_dict_ def _all_elements_equal(list_to_check:list) -> bool: if len(list_to_check) == 1: return True return all([ (list_to_check[i] == list_to_check[i+1]) for i in range(len(list_to_check)-1) ]) def check_eval_datasets(eval_datasets, X_train_=None, y_train_=None, **check_x_y_kwargs): """Check `eval_datasets` parameter.""" # Whether to add training data in to returned data dictionary if X_train_ is None and y_train_ is None: result_datasets = OrderedDict({}) else: result_datasets = OrderedDict({TRAIN_DATA_NAME: (X_train_, y_train_)}) # If eval_datasets is None # return data dictionary if eval_datasets == None: return result_datasets # If eval_datasets is dict elif isinstance(eval_datasets, dict): # Check dict and validate all names (keys) and data tuples (values) eval_datasets_ = _check_eval_datasets_dict(eval_datasets, **check_x_y_kwargs) # Combine train_datasets and eval_datasets_ result_datasets.update(eval_datasets_) # Ensure all datasets have the same number of features if not _all_elements_equal([data_tuple[0].shape[1] for data_tuple in result_datasets.values()]): raise ValueError( f"The train + evaluation datasets have inconsistent number of" f" features. Make sure that the data given in 'eval_datasets'" f" and the training data ('X', 'y') are sampled from the same" f" task/distribution." ) return result_datasets # Else raise TypeError else: raise TypeError( VALID_DATA_INFO + \ f" Got {type(eval_datasets)}, please check your usage." )
35.025862
92
0.635491
acedc7e126151ab3ca2a0158293c4be258b76cc6
2,245
py
Python
tests/rules/test_apt_get.py
HiteshMah-Jan/thefuck
132c62262246824470934c2c6f46919ef6f00203
[ "MIT" ]
75,504
2015-04-08T18:22:19.000Z
2022-03-31T23:59:52.000Z
tests/rules/test_apt_get.py
HiteshMah-Jan/thefuck
132c62262246824470934c2c6f46919ef6f00203
[ "MIT" ]
1,160
2015-04-17T18:47:12.000Z
2022-03-30T20:42:26.000Z
tests/rules/test_apt_get.py
HiteshMah-Jan/thefuck
132c62262246824470934c2c6f46919ef6f00203
[ "MIT" ]
4,399
2015-04-17T18:36:04.000Z
2022-03-31T07:01:03.000Z
import pytest from thefuck.rules.apt_get import match, get_new_command from thefuck.types import Command @pytest.mark.parametrize('command, packages', [ (Command('vim', 'vim: command not found'), [('vim', 'main'), ('vim-tiny', 'main')]), (Command('sudo vim', 'vim: command not found'), [('vim', 'main'), ('vim-tiny', 'main')]), (Command('vim', "The program 'vim' is currently not installed. You can install it by typing: sudo apt install vim"), [('vim', 'main'), ('vim-tiny', 'main')])]) def test_match(mocker, command, packages): mocker.patch('thefuck.rules.apt_get.which', return_value=None) mocker.patch('thefuck.rules.apt_get._get_packages', create=True, return_value=packages) assert match(command) @pytest.mark.parametrize('command, packages, which', [ (Command('a_bad_cmd', 'a_bad_cmd: command not found'), [], None), (Command('vim', ''), [], None), (Command('', ''), [], None), (Command('vim', 'vim: command not found'), ['vim'], '/usr/bin/vim'), (Command('sudo vim', 'vim: command not found'), ['vim'], '/usr/bin/vim')]) def test_not_match(mocker, command, packages, which): mocker.patch('thefuck.rules.apt_get.which', return_value=which) mocker.patch('thefuck.rules.apt_get._get_packages', create=True, return_value=packages) assert not match(command) @pytest.mark.parametrize('command, new_command, packages', [ (Command('vim', ''), 'sudo apt-get install vim && vim', [('vim', 'main'), ('vim-tiny', 'main')]), (Command('convert', ''), 'sudo apt-get install imagemagick && convert', [('imagemagick', 'main'), ('graphicsmagick-imagemagick-compat', 'universe')]), (Command('sudo vim', ''), 'sudo apt-get install vim && sudo vim', [('vim', 'main'), ('vim-tiny', 'main')]), (Command('sudo convert', ''), 'sudo apt-get install imagemagick && sudo convert', [('imagemagick', 'main'), ('graphicsmagick-imagemagick-compat', 'universe')])]) def test_get_new_command(mocker, command, new_command, packages): mocker.patch('thefuck.rules.apt_get._get_packages', create=True, return_value=packages) assert get_new_command(command) == new_command
41.574074
120
0.633853
acedc853c9570363c9dbcf91e41ae781987e2013
10,041
py
Python
sdk/servicebus/azure-servicebus/azure/servicebus/management/_generated/operations/_service_bus_management_client_operations.py
praveenkuttappan/azure-sdk-for-python
4b79413667b7539750a6c7dde15737013a3d4bd5
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/servicebus/azure-servicebus/azure/servicebus/management/_generated/operations/_service_bus_management_client_operations.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/servicebus/azure-servicebus/azure/servicebus/management/_generated/operations/_service_bus_management_client_operations.py
v-xuto/azure-sdk-for-python
9c6296d22094c5ede410bc83749e8df8694ccacc
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from .. import models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ServiceBusManagementClientOperationsMixin(object): def list_subscriptions( self, topic_name, # type: str skip=0, # type: Optional[int] top=100, # type: Optional[int] api_version="2021_05", # type: Optional[str] **kwargs # type: Any ): # type: (...) -> object """Get the details about the subscriptions of the given topic. Get subscriptions. :param topic_name: name of the topic. :type topic_name: str :param skip: :type skip: int :param top: :type top: int :param api_version: Api Version. :type api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: object, or the result of cls(response) :rtype: object :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.list_subscriptions.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if skip is not None: query_parameters['$skip'] = self._serialize.query("skip", skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int') if api_version is not None: query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/xml' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.ServiceBusManagementError, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_subscriptions.metadata = {'url': '/{topicName}/subscriptions'} # type: ignore def list_rules( self, topic_name, # type: str subscription_name, # type: str skip=0, # type: Optional[int] top=100, # type: Optional[int] api_version="2021_05", # type: Optional[str] **kwargs # type: Any ): # type: (...) -> object """Get the details about the rules of the given topic subscription. Get rules of a topic subscription. :param topic_name: name of the topic. :type topic_name: str :param subscription_name: name of the subscription. :type subscription_name: str :param skip: :type skip: int :param top: :type top: int :param api_version: Api Version. :type api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: object, or the result of cls(response) :rtype: object :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.list_rules.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'topicName': self._serialize.url("topic_name", topic_name, 'str', min_length=1), 'subscriptionName': self._serialize.url("subscription_name", subscription_name, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if skip is not None: query_parameters['$skip'] = self._serialize.query("skip", skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int') if api_version is not None: query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/xml' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.ServiceBusManagementError, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_rules.metadata = {'url': '/{topicName}/subscriptions/{subscriptionName}/rules'} # type: ignore def list_entities( self, entity_type, # type: str skip=0, # type: Optional[int] top=100, # type: Optional[int] api_version="2021_05", # type: Optional[str] **kwargs # type: Any ): # type: (...) -> object """Get the details about the entities of the given Service Bus namespace. Get Queues or topics. :param entity_type: List all queues or all topics of the service bus. Value can be "queues" or "topics". :type entity_type: str :param skip: :type skip: int :param top: :type top: int :param api_version: Api Version. :type api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: object, or the result of cls(response) :rtype: object :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.list_entities.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'entityType': self._serialize.url("entity_type", entity_type, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if skip is not None: query_parameters['$skip'] = self._serialize.query("skip", skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int') if api_version is not None: query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/xml' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.ServiceBusManagementError, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_entities.metadata = {'url': '/$Resources/{entityType}'} # type: ignore
42.367089
116
0.635395
acedc8df77b5f5792e51763fabe4fa2cfe585958
215
py
Python
budgetcenter/budgetcenter/doctype/self_supported_maintenance_item_table/self_supported_maintenance_item_table.py
yuvabedev/budgetcenter
d3307500fa9fa71c0d292951282f1f6f135ff510
[ "MIT" ]
null
null
null
budgetcenter/budgetcenter/doctype/self_supported_maintenance_item_table/self_supported_maintenance_item_table.py
yuvabedev/budgetcenter
d3307500fa9fa71c0d292951282f1f6f135ff510
[ "MIT" ]
3
2022-01-04T06:30:57.000Z
2022-01-19T06:42:40.000Z
budgetcenter/budgetcenter/doctype/self_supported_maintenance_item_table/self_supported_maintenance_item_table.py
yuvabedev/budgetcenter
d3307500fa9fa71c0d292951282f1f6f135ff510
[ "MIT" ]
null
null
null
# Copyright (c) 2021, Yuvavbe and contributors # For license information, please see license.txt # import frappe from frappe.model.document import Document class SelfSupportedMaintenanceItemTable(Document): pass
23.888889
50
0.813953
acedc981a57cd1349ffb43bd1d1ebb49faacad77
1,072
py
Python
eod/tasks/cls/models/heads/cls_head.py
scott-mao/EOD
f10e64de86c0f356ebf5c7e923f4042eec4207b1
[ "Apache-2.0" ]
1
2022-01-12T01:51:39.000Z
2022-01-12T01:51:39.000Z
eod/tasks/cls/models/heads/cls_head.py
YZW-explorer/EOD
f10e64de86c0f356ebf5c7e923f4042eec4207b1
[ "Apache-2.0" ]
null
null
null
eod/tasks/cls/models/heads/cls_head.py
YZW-explorer/EOD
f10e64de86c0f356ebf5c7e923f4042eec4207b1
[ "Apache-2.0" ]
null
null
null
import torch.nn as nn from eod.utils.general.registry_factory import MODULE_ZOO_REGISTRY from eod.utils.model.initializer import init_weights_msra __all__ = ['BaseClsHead'] @MODULE_ZOO_REGISTRY.register('base_cls_head') class BaseClsHead(nn.Module): def __init__(self, num_classes, in_plane, input_feature_idx=-1): super(BaseClsHead, self).__init__() self.pool = nn.AdaptiveAvgPool2d((1, 1)) self.num_classes = num_classes self.in_plane = in_plane self.input_feature_idx = input_feature_idx self.classifier = nn.Linear(self.in_plane, self.num_classes) self.prefix = self.__class__.__name__ self._init_weights() def _init_weights(self): init_weights_msra(self.classifier) def forward_net(self, x): if isinstance(x, dict): x = x['features'][self.input_feature_idx] x = self.pool(x) x = x.view(x.size(0), -1) logits = self.classifier(x) return {'logits': logits} def forward(self, input): return self.forward_net(input)
31.529412
68
0.676306
acedc99bac6da36a7d58c0f7e10f9787fd6961af
1,338
py
Python
figures/linelist_comparison/comparison.py
DanielAndreasen/Thesis
da18d41e48de5d34c8281ffd9e850dfd4fe37824
[ "MIT" ]
2
2018-04-25T08:31:52.000Z
2018-05-09T13:46:52.000Z
figures/linelist_comparison/comparison.py
DanielAndreasen/Thesis
da18d41e48de5d34c8281ffd9e850dfd4fe37824
[ "MIT" ]
null
null
null
figures/linelist_comparison/comparison.py
DanielAndreasen/Thesis
da18d41e48de5d34c8281ffd9e850dfd4fe37824
[ "MIT" ]
null
null
null
from __future__ import division import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.rcParams['xtick.direction'] = 'in' plt.rcParams['ytick.direction'] = 'in' plt.rcParams['axes.spines.right'] = False plt.rcParams['axes.spines.top'] = False plt.rcParams['axes.linewidth'] = 2 plt.rcParams['xtick.major.width'] = 2 plt.rcParams['ytick.major.width'] = 2 def get_combined_data(): names = ('wavelength', 'element', 'EP', 'loggf', 'EW') df1 = pd.read_table('linelist1.moog', delimiter=r'\s+', names=names, skiprows=1) df2 = pd.read_table('linelist2.moog', delimiter=r'\s+', names=names, skiprows=1) df1['wavelength'] = [round(w, 2) for w in df1['wavelength']] df2['wavelength'] = [round(w, 2) for w in df2['wavelength']] df = pd.merge(df1, df2, how='outer', left_on='wavelength', right_on='wavelength', suffixes=('_1', '_2')) df['diff'] = df['EW_1'] - df['EW_2'] return df if __name__ == '__main__': df = get_combined_data() m, s = np.nanmedian(df['diff']), np.nanstd(df['diff']) plt.figure() plt.plot(df['EW_1'], df['diff'], '.') plt.hlines([m, m+s, m-s], 4, 200) plt.xlabel(r'EW$_1$ [m$\AA$]') plt.ylabel(r'EW$_1$ - EW$_2$ [m$\AA$]') # plt.savefig('../linelist_comparison.pdf') plt.show()
31.857143
84
0.607623
acedcb492f536f224942e50fa793ab6ccaa0067c
3,828
py
Python
strategy/gsmdk/SkyPark/python/SkyPark.py
QUANTAXIS/quant
d3b76799b20cac7a876fbdeb6f97c48295dde06f
[ "MIT" ]
null
null
null
strategy/gsmdk/SkyPark/python/SkyPark.py
QUANTAXIS/quant
d3b76799b20cac7a876fbdeb6f97c48295dde06f
[ "MIT" ]
null
null
null
strategy/gsmdk/SkyPark/python/SkyPark.py
QUANTAXIS/quant
d3b76799b20cac7a876fbdeb6f97c48295dde06f
[ "MIT" ]
1
2021-04-10T07:10:42.000Z
2021-04-10T07:10:42.000Z
# encoding: utf-8 from gmsdk.api import StrategyBase from gmsdk import md from gmsdk.enums import * import arrow import time #每次开仓量 OPEN_VOL = 5 class SkyPark(StrategyBase) : def __init__(self, *args, **kwargs): super(SkyPark, self).__init__(*args, **kwargs) #上、下轨 self.upr = None self.dwn = None #开仓标识 self.open_long_flag = False self.open_short_flag = False #持仓量 self.hoding = 0; self.__get_param() self.__init_data() def __get_param( self ): ''' 获取配置参数 ''' #交易证券代码 self.trade_symbol = self.config.get('para', 'trade_symbol') pos = self.trade_symbol.find('.') self.exchange = self.trade_symbol[:pos] self.sec_id = self.trade_symbol[pos+1:] FMT = '%s %s' today = arrow.now().date() #第一根K线时间 first_kline_time = self.config.get('para', 'first_kline_time') et = FMT % (today.isoformat(), first_kline_time) self.first_kline_time_str = et #平仓时间 end_time = self.config.get('para', 'end_time') et = FMT % (today.isoformat(), end_time) self.end_trading = arrow.get(et).replace(tzinfo='local').timestamp print "end time %s" % (et) #开多阀值 self.open_long_size = self.config.getfloat('para', 'open_long_size') #开空阀值 self.open_short_size = self.config.getfloat('para', 'open_short_size') def __init_data( self ): dailybars = self.get_last_dailybars( self.trade_symbol ) if len(dailybars) > 0 : self.pre_close = dailybars[0].close; #第一根K线数据 while self.upr is None or self.dwn is None: print 'waiting for get the first K line...' bars = self.get_bars( self.trade_symbol, 60, self.first_kline_time_str, self.first_kline_time_str ) if len(bars) > 0 : self.upr = bars[0].high #上轨 self.dwn = bars[0].low #下轨 print 'upr:%s, dwn: %s'%(self.upr, self.dwn) if bars[0].open > self.pre_close * (1 + self.open_long_size) : self.open_long_flag = True #开多仓标识 elif bars[0].open > self.pre_close * ( 1 - self.open_short_size): self.open_short_flag = True#开空仓标识 else: print 'Do not meet the trading condition, today do not trading.' break time.sleep( 1 ) def on_tick(self, tick): #最新报价 self.close = tick.last_price def on_bar(self, bar): ''' bar周期数据事件 ''' if self.open_long_flag and self.close > self.upr and 0 == self.hoding : self.open_long(self.exchange, self.sec_id, 0, OPEN_VOL ) self.hoding += OPEN_VOL print 'open long: last price %s, vol %s'%(self.close, OPEN_VOL) elif self.open_short_flag and self.close < self.dwn and 0 == self.hoding : self.open_short( self.exchange, self.sec_id, 0, OPEN_VOL ) self.hoding += OPEN_VOL print 'open short: last price %s, vol %s'%(self.close, OPEN_VOL) #日内平仓 if bar.utc_time > self.end_trading : if self.open_long_flag and self.hoding > 0 : self.close_long( self.exchange, self.sec_id, 0, self.hoding ) print 'end trading time close long, vol: %s'%self.hoding elif self.open_short_flag and self.hoding > 0 : self.close_short( self.exchange, self.sec_id, 0, self.hoding ) print 'end trading time close short, vol: %s'%self.hoding if __name__ == '__main__': sky_park = SkyPark(config_file='SkyPark.ini') ret = sky_park.run() print sky_park.get_strerror(ret)
33.578947
111
0.57001
acedcb68082587fca81c04b174e60e636cb380d0
683
py
Python
src/cirrus_ngs/server/Pipelines/util/RestoreBackups.py
ucsd-ccbb/cirrus-ngs
8f51450b3d971b03d4fd08a1aab11d5a076aa23e
[ "MIT" ]
8
2017-01-20T00:00:45.000Z
2022-02-11T00:20:45.000Z
src/cirrus_ngs/server/Pipelines/util/RestoreBackups.py
ucsd-ccbb/cirrus-ngs
8f51450b3d971b03d4fd08a1aab11d5a076aa23e
[ "MIT" ]
3
2018-03-23T19:09:06.000Z
2018-03-26T19:49:55.000Z
src/cirrus_ngs/server/Pipelines/util/RestoreBackups.py
ucsd-ccbb/cirrus-ngs
8f51450b3d971b03d4fd08a1aab11d5a076aa23e
[ "MIT" ]
2
2018-03-29T06:24:31.000Z
2019-04-01T18:34:53.000Z
import os config_dir = "/shared/workspace/cirrus-ngs/src/cirrus_ngs/server/Pipelines/config" scripts_dir = "/shared/workspace/cirrus-ngs/src/cirrus_ngs/server/Pipelines/scripts" for path,dirs,files in os.walk(config_dir): for curr_file in files: if curr_file.endswith("BACKUP"): os.rename("{}/{}".format(path, curr_file), "{}/{}".format(path, os.path.splitext(curr_file)[0])) for path,dirs,files in os.walk(scripts_dir): dirs[:] = [d for d in dirs if not d == "deprecated"] for curr_file in files: if curr_file.endswith("BACKUP"): os.rename("{}/{}".format(path, curr_file), "{}/{}".format(path, os.path.splitext(curr_file)[0]))
42.6875
108
0.667643
acedcb6d51ea1897bcdcf6ce085b295852593c69
2,354
py
Python
tasks/search.py
NT795/wbSpider
5323e8cda958a390ee611d103aba38a6863bc520
[ "MIT" ]
null
null
null
tasks/search.py
NT795/wbSpider
5323e8cda958a390ee611d103aba38a6863bc520
[ "MIT" ]
null
null
null
tasks/search.py
NT795/wbSpider
5323e8cda958a390ee611d103aba38a6863bc520
[ "MIT" ]
null
null
null
# coding:utf-8 from urllib import parse as url_parse from logger.log import crawler from tasks.workers import app from page_get.basic import get_page from config.conf import get_max_search_page from page_parse import search as parse_search from db.search_words import get_search_keywords from db.keywords_wbdata import insert_keyword_wbid from db.wb_data import insert_weibo_data, get_wb_by_mid # This url is just for original weibos. # If you want other kind of search, you can change the url below url = 'http://s.weibo.com/weibo/{}&scope=ori&suball=1&page={}' limit = get_max_search_page() + 1 @app.task(ignore_result=True) def search_keyword(keyword, keyword_id): cur_page = 1 encode_keyword = url_parse.quote(keyword) #crawler.info(limit) while cur_page < limit: cur_url = url.format(encode_keyword, cur_page) search_page = get_page(cur_url) #crawler.info(search_page) if not search_page: crawler.warning('No result for keyword {}, the source page is {}'.format(keyword, search_page)) return search_list = parse_search.get_search_info(search_page) # Because the search results are sorted by time, if any result has been stored in mysql, # we need not crawl the same keyword in this turn for wb_data in search_list: rs = get_wb_by_mid(wb_data.weibo_id) if rs: crawler.info('keyword {} has been crawled in last turn'.format(keyword)) #continue return else: insert_weibo_data(wb_data) insert_keyword_wbid(keyword_id, wb_data.weibo_id) # send task for crawling user info app.send_task('tasks.user.crawl_person_infos', args=(wb_data.uid,), queue='user_crawler', routing_key='for_user_info') if 'page next S_txt1 S_line1' in search_page: cur_page += 1 else: crawler.info('keyword {} has been crawled in this turn'.format(keyword)) return @app.task(ignore_result=True) def excute_search_task(): keywords = get_search_keywords() for each in keywords: app.send_task('tasks.search.search_keyword', args=(each[0], each[1]), queue='search_crawler', routing_key='for_search_info')
37.967742
107
0.66695
acedcc1006a952d355a3fc79fde230da87b4320a
756
py
Python
packTextures.py
superspeeder/elgameig
80bec1321d84eb5e7ab50cef1d638ad438b901d5
[ "MIT" ]
2
2022-02-21T19:25:54.000Z
2022-02-26T02:17:04.000Z
packTextures.py
superspeeder/elgameig
80bec1321d84eb5e7ab50cef1d638ad438b901d5
[ "MIT" ]
1
2022-03-04T00:38:48.000Z
2022-03-04T00:38:48.000Z
packTextures.py
superspeeder/elgameig
80bec1321d84eb5e7ab50cef1d638ad438b901d5
[ "MIT" ]
null
null
null
import json import os import sys def runTexturePacker(classpath, inpath, outpath, name): sys.stdout.write(f"Packing textures at {os.path.abspath(os.path.join('textures', inpath))} into {os.path.abspath(os.path.join('core/assets/textures/', outpath, name + '.atlas'))}\r\n") sys.stdout.flush() if os.system(f"java -cp {classpath} com.badlogic.gdx.tools.texturepacker.TexturePacker {os.path.join('textures', inpath)} {os.path.join('core/assets/textures/', outpath)} {name}"): raise RuntimeError("Failed to pack textures") if __name__ == "__main__": print("Packing Textures") with open("texturePacking.json") as f: tpacking_data = json.load(f) for path in tpacking_data: runTexturePacker(sys.argv[1], *path)
42
188
0.698413
acedcd0a3b4e59e33fcd0d6e78143e88138ff52e
13,400
py
Python
language/mentionmemory/tasks/ultra_fine_entity_typing_task_test.py
urikz/language
503aca178c98fed4c606cf83e58ae0f84012a4d9
[ "Apache-2.0" ]
null
null
null
language/mentionmemory/tasks/ultra_fine_entity_typing_task_test.py
urikz/language
503aca178c98fed4c606cf83e58ae0f84012a4d9
[ "Apache-2.0" ]
null
null
null
language/mentionmemory/tasks/ultra_fine_entity_typing_task_test.py
urikz/language
503aca178c98fed4c606cf83e58ae0f84012a4d9
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Ultra Fine Entity Typing model and evaluation functions.""" import copy import json import os from absl.testing import absltest from absl.testing import parameterized from flax.training import common_utils import jax import jax.numpy as jnp from language.mentionmemory.encoders import import_encoders # pylint: disable=unused-import from language.mentionmemory.tasks import ultra_fine_entity_typing_task from language.mentionmemory.utils import metric_utils from language.mentionmemory.utils import test_utils import ml_collections import numpy as np import tensorflow.compat.v2 as tf class UltraFineEntityTypingMetricsTest(test_utils.TestCase): """Test metrics processing and computations.""" def _get_test_data_path(self, file_name): path = os.path.join( 'language/mentionmemory/tasks/testdata/' 'ultra_fine_entity_typing', file_name) return path def assertNumpyArraysEqual(self, actual, expected): self.assertSequenceEqual(actual.tolist(), expected.tolist()) def setUp(self): super().setUp() # See https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html # for the dataset and files description. # Model predictions are downloaded from # http://nlp.cs.washington.edu/entity_type/model/best_model.tar.gz vocab_path = self._get_test_data_path('types.txt') with tf.io.gfile.GFile(vocab_path, 'r') as vocab_file: self.label_vocab = { label.strip(): index for index, label in enumerate(vocab_file) } data_path = self._get_test_data_path('dev.json') with tf.io.gfile.GFile(data_path, 'rb') as data_file: data = json.load(data_file) size = (len(data), len(self.label_vocab)) self.predictions = np.zeros(size, dtype=np.int16) self.labels = np.zeros(size, dtype=np.int16) for index, value in enumerate(data.values()): current_labels = np.array( [self.label_vocab[label] for label in value['gold']]) self.labels[index, current_labels] = 1 current_predictions = np.array( [self.label_vocab[label] for label in value['pred']]) self.predictions[index, current_predictions] = 1 def test_get_predictions(self): batch_size = 3 size = (batch_size, 13) logits = np.random.uniform(low=-1, high=1, size=size) expected_predictions = (logits > 0).astype(np.int32) actual_predictions = ultra_fine_entity_typing_task.get_predictions(logits) self.assertNumpyArraysEqual(actual_predictions, expected_predictions) all_negative_logits = -np.abs(logits) expected_predictions = np.zeros(size, dtype=np.int32) expected_predictions[np.arange(batch_size), np.argmax(all_negative_logits, axis=-1)] = 1 actual_predictions = ultra_fine_entity_typing_task.get_predictions( all_negative_logits) self.assertNumpyArraysEqual(actual_predictions, expected_predictions) @parameterized.parameters( (0, 3, 10, 1), (1, 3, 10, 3), (2, 3, 10, 10), (3, 3, 10, 0), (4, 10, 50, 1), (5, 10, 50, 10), (6, 10, 50, 50), (7, 10, 50, 0), ) def test_get_mrr(self, seed, batch_size, num_total_labels, num_correct_labels): np.random.seed(seed) logits = np.random.random((batch_size, num_total_labels)) labels = np.zeros((batch_size, num_total_labels)) for i in range(batch_size): correct_labels = np.random.choice( num_total_labels, size=(num_correct_labels,), replace=False) labels[i, correct_labels] = 1 actual_metrics = ultra_fine_entity_typing_task.get_mrr(labels, logits) expected_mrr, expected_denom = 0, 0 for i in range(batch_size): if labels[i].sum() == 0: continue expected_denom += 1 logits_labels_list = list(zip(logits[i], labels[i])) logits_labels_list.sort(reverse=True) current_mrr = [] for j in range(num_total_labels): if logits_labels_list[j][1] == 1: current_mrr.append(1 / (1 + j)) current_mrr = np.array(current_mrr) expected_mrr += current_mrr.mean() self.assertEqual(actual_metrics['denominator'], expected_denom) self.assertAlmostEqual(actual_metrics['value'], expected_mrr, places=4) @parameterized.parameters((1,), (2,), (10,)) def test_reproduce_paper_evals(self, num_chunks): """Reproduce results from https://www.aclweb.org/anthology/P18-1009.pdf.""" num_samples = self.labels.shape[0] chunk_size = num_samples // num_chunks metrics = [] for chunk_start in range(0, num_samples, chunk_size): chunk_end = min(chunk_start + chunk_size, num_samples) labels = self.labels[chunk_start:chunk_end] predictions = self.predictions[chunk_start:chunk_end] current_metrics = ultra_fine_entity_typing_task.get_prediction_recall_metrics( labels, predictions) current_metrics = jax.tree_map(lambda x: jnp.expand_dims(x, 0), current_metrics) metrics.append(current_metrics) metrics = common_utils.get_metrics(metrics) metrics_sum = jax.tree_map(jnp.sum, metrics) processed_metrics = metric_utils.process_metrics(metrics_sum) self.assertAlmostEqual( processed_metrics['total_precision_value'], 0.481, places=3) self.assertAlmostEqual( processed_metrics['total_recall_value'], 0.232, places=3) self.assertAlmostEqual( processed_metrics['coarse_grained_precision_value'], 0.603, places=3) self.assertAlmostEqual( processed_metrics['coarse_grained_recall_value'], 0.616, places=3) self.assertAlmostEqual( processed_metrics['fine_grained_precision_value'], 0.404, places=3) self.assertAlmostEqual( processed_metrics['fine_grained_recall_value'], 0.384, places=3) self.assertAlmostEqual( processed_metrics['ultra_fine_grained_precision_value'], 0.428, places=3) self.assertAlmostEqual( processed_metrics['ultra_fine_grained_recall_value'], 0.088, places=3) class UltraFineEntityTypingTaskTest(parameterized.TestCase): """Tests for UltraFineEntityTyping task.""" encoder_config = { 'dtype': 'bfloat16', 'vocab_size': 1000, 'entity_vocab_size': 1000, 'max_positions': 128, 'max_length': 128, 'hidden_size': 4, 'intermediate_dim': 8, 'entity_dim': 4, 'num_attention_heads': 2, 'num_initial_layers': 1, 'num_final_layers': 1, 'dropout_rate': 0.1, } model_config = { 'encoder_config': encoder_config, 'encoder_name': 'eae', 'dtype': 'bfloat16', } config = { 'model_config': model_config, 'seed': 0, } def assertArrayEqual(self, expected, actual): expected = expected.ravel().tolist() actual = actual.ravel().tolist() self.assertSequenceEqual(expected, actual) def _gen_raw_sample( self, config): """Generate raw example.""" features = {} # Generate text max_length = config.model_config.encoder_config.max_length features['text_ids'] = np.random.randint( low=1, high=config.model_config.encoder_config.vocab_size, size=(max_length), dtype=np.int64) features['text_mask'] = np.ones_like(features['text_ids']) # Generate labels num_classes = ultra_fine_entity_typing_task.NUM_CLASSES target = np.random.randint( num_classes, size=(config.max_num_labels_per_sample)) target_mask = np.random.randint(2, size=(config.max_num_labels_per_sample)) if target_mask.sum() == 0: # There should be at least one valid label target_mask[0] = 1 target = target * target_mask features['target'] = target features['target_mask'] = target_mask # Generate mentions mention_positions = np.random.choice( max_length, size=(2 * config.max_mentions_per_sample), replace=False) mention_positions.sort() mention_mask = np.random.randint( 2, size=(config.max_mentions_per_sample), dtype=np.int64) if mention_mask.sum() == 0: # There should be at least one valid label mention_mask[0] = 1 mention_start_positions = mention_positions[0::2] * mention_mask mention_end_positions = mention_positions[1::2] * mention_mask # Shuffle mentions p = np.random.permutation(config.max_mentions_per_sample) mention_start_positions = mention_start_positions[p] mention_end_positions = mention_end_positions[p] mention_mask = mention_mask[p] self.assertTrue(np.all(mention_start_positions[mention_mask == 0] == 0)) self.assertTrue(np.all(mention_end_positions[mention_mask == 0] == 0)) self.assertTrue(np.all(mention_mask[mention_mask == 0] == 0)) features['mention_start_positions'] = mention_start_positions features['mention_end_positions'] = mention_end_positions features['mention_mask'] = mention_mask features['mention_target_indices'] = np.random.choice( np.nonzero(mention_mask)[0], size=(1)) return features def _gen_raw_batch( self, config): samples = [ self._gen_raw_sample(config) for _ in range(config.per_device_batch_size) ] features = {} for feature_name in samples[0].keys(): features[feature_name] = np.stack( [sample[feature_name] for sample in samples]) return features @parameterized.parameters([ (2, 24, 10, 2), (5, 10, 1, 1), (5, 10, 1, 30), (5, 10, 10, 1), (10, 20, 10, 5), ]) def test_loss_fn(self, per_device_batch_size, max_mentions_per_sample, max_mentions, max_num_labels_per_sample): """Test loss function runs and produces expected values.""" config = copy.deepcopy(self.config) config['per_device_batch_size'] = per_device_batch_size config['max_mentions_per_sample'] = max_mentions_per_sample config['max_mentions'] = max_mentions config['max_num_labels_per_sample'] = max_num_labels_per_sample config = ml_collections.ConfigDict(config) raw_batch = self._gen_raw_batch(config) collater_fn = ultra_fine_entity_typing_task.UltraFineEntityTypingTask.make_collater_fn( config) postprocess_fn = ultra_fine_entity_typing_task.UltraFineEntityTypingTask.make_output_postprocess_fn( config) batch = collater_fn(raw_batch) batch = jax.tree_map(jnp.asarray, batch) self.assertSequenceEqual(batch['mention_target_weights'].shape, [config.per_device_batch_size]) self.assertSequenceEqual(batch['mention_target_batch_positions'].shape, [config.per_device_batch_size]) self.assertArrayEqual(batch['mention_target_batch_positions'], np.arange(config.per_device_batch_size)) self.assertSequenceEqual(raw_batch['mention_target_indices'].shape, [config.per_device_batch_size, 1]) expected_mention_target_indices = (np.arange(config.per_device_batch_size), raw_batch['mention_target_indices'][:, 0]) expected_mention_target_start_positions = raw_batch[ 'mention_start_positions'][expected_mention_target_indices] expected_mention_target_end_positions = raw_batch['mention_end_positions'][ expected_mention_target_indices] self.assertArrayEqual(batch['mention_target_start_positions'], expected_mention_target_start_positions) self.assertArrayEqual(batch['mention_target_end_positions'], expected_mention_target_end_positions) model = ultra_fine_entity_typing_task.UltraFineEntityTypingTask.build_model( config.model_config) dummy_input = ultra_fine_entity_typing_task.UltraFineEntityTypingTask.dummy_input( config) init_rng = jax.random.PRNGKey(0) initial_parameters = model.init(init_rng, dummy_input, True) loss_fn = ultra_fine_entity_typing_task.UltraFineEntityTypingTask.make_loss_fn( config) _, metrics, auxiliary_output = loss_fn(config.model_config, initial_parameters['params'], {}, batch, True) self.assertEqual(metrics['agg']['denominator'], config.per_device_batch_size) self.assertEqual(metrics['agg_mrr']['denominator'], config.per_device_batch_size) features = postprocess_fn(batch, auxiliary_output) # Check features are JSON-serializable json.dumps(features) # Check features match the original batch for key in batch.keys(): self.assertArrayEqual(np.array(features[key]), batch[key]) if __name__ == '__main__': absltest.main()
38.616715
104
0.691866
acedce4177ea9a74697eadc5e1407bf2c04348c0
3,164
py
Python
appengine/tictactoe/game.py
salendron/ai-kindergarten
02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e
[ "MIT" ]
null
null
null
appengine/tictactoe/game.py
salendron/ai-kindergarten
02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e
[ "MIT" ]
null
null
null
appengine/tictactoe/game.py
salendron/ai-kindergarten
02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e
[ "MIT" ]
null
null
null
import jwt from .jwt_secret import SECRET from copy import deepcopy import random class Game(object): def __init__(self): self.players = self.choose_players() self.current_player = 0 self.states = [] self.moves = [] def choose_players(self): players = ['h','m'] random.shuffle(players) return players def start(self): #generate empty board self.states.append( [-1,-1,-1,-1,-1,-1,-1,-1,-1] ) @classmethod def from_token(cls, token): game = Game() try: payload = jwt.decode(token,SECRET, algorithms=['HS256']) game.players = payload['players'] game.current_player = payload['current-player'] game.states = payload['states'] game.moves = payload['moves'] return game except Exception as ex: game.start() return game def to_token(self): payload = { 'players': self.players, 'current-player': self.current_player, 'states': self.states, 'moves': self.moves } return jwt.encode(payload, SECRET, algorithm='HS256').decode('unicode_escape') def _is_valid_move(self, move): try: move = int(move) except ValueError: return False if move < 0 or move > 8: return False if move not in self.get_valid_moves(): return False return True def get_valid_moves(self): current_state = self.states[-1] valid_moves = [] for i in range(9): if current_state[i] == -1: valid_moves.append(i) return valid_moves def play(self, move): if not self._is_valid_move(move): raise Exception("Invalid move!") move = int(move) new_state = deepcopy(self.states[-1]) new_state[move] = self.current_player self.states.append(new_state) self.moves.append(move) self.current_player = 1 if self.current_player == 0 else 0 def get_winner(self): current_state = self.states[-1] #check each player for p in range(2): # check each row for r in range(3): f1 = r * 3 if current_state[f1] == p and current_state[f1 + 1] == p and current_state[f1 + 2] == p: return p # check each column for c in range(3): if current_state[c] == p and current_state[c + 3] == p and current_state[c + 6] == p: return p # check diagonal if current_state[0] == p and current_state[4] == p and current_state[8] == p: return p if current_state[2] == p and current_state[4] == p and current_state[6] == p: return p # check for a tie moves_played = 0 for f in current_state: if f != -1: moves_played += 1 if moves_played == 9: return 2 return -1
25.111111
104
0.521176
acedcf1115cdd54b51d4d7d8416f458d3837d6ef
19,523
py
Python
google/appengine/tools/appengine_rpc.py
micahstubbs/google_appengine
5db815221880602442928be44a43da22496e6827
[ "Apache-2.0" ]
null
null
null
google/appengine/tools/appengine_rpc.py
micahstubbs/google_appengine
5db815221880602442928be44a43da22496e6827
[ "Apache-2.0" ]
null
null
null
google/appengine/tools/appengine_rpc.py
micahstubbs/google_appengine
5db815221880602442928be44a43da22496e6827
[ "Apache-2.0" ]
2
2019-01-13T07:58:35.000Z
2020-11-04T04:55:35.000Z
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tool for performing authenticated RPCs against App Engine.""" import google import cookielib import cStringIO import fancy_urllib import gzip import hashlib import logging import os import re import socket import sys import time import urllib import urllib2 logger = logging.getLogger('google.appengine.tools.appengine_rpc') def GetPlatformToken(os_module=os, sys_module=sys, platform=sys.platform): """Returns a 'User-agent' token for the host system platform. Args: os_module, sys_module, platform: Used for testing. Returns: String containing the platform token for the host system. """ if hasattr(sys_module, "getwindowsversion"): windows_version = sys_module.getwindowsversion() version_info = ".".join(str(i) for i in windows_version[:4]) return platform + "/" + version_info elif hasattr(os_module, "uname"): uname = os_module.uname() return "%s/%s" % (uname[0], uname[2]) else: return "unknown" def HttpRequestToString(req, include_data=True): """Converts a urllib2.Request to a string. Args: req: urllib2.Request Returns: Multi-line string representing the request. """ headers = "" for header in req.header_items(): headers += "%s: %s\n" % (header[0], header[1]) template = ("%(method)s %(selector)s %(type)s/1.1\n" "Host: %(host)s\n" "%(headers)s") if include_data: template = template + "\n%(data)s" return template % { 'method': req.get_method(), 'selector': req.get_selector(), 'type': req.get_type().upper(), 'host': req.get_host(), 'headers': headers, 'data': req.get_data(), } class ClientLoginError(urllib2.HTTPError): """Raised to indicate there was an error authenticating with ClientLogin.""" def __init__(self, url, code, msg, headers, args): urllib2.HTTPError.__init__(self, url, code, msg, headers, None) self.args = args self._reason = args.get("Error") self.info = args.get("Info") def read(self): return '%d %s: %s' % (self.code, self.msg, self.reason) @property def reason(self): return self._reason class AbstractRpcServer(object): """Provides a common interface for a simple RPC server.""" RUNTIME = "python" def __init__(self, host, auth_function, user_agent, source, host_override=None, extra_headers=None, save_cookies=False, auth_tries=3, account_type=None, debug_data=True, secure=True, ignore_certs=False, rpc_tries=3, options=None): """Creates a new HttpRpcServer. Args: host: The host to send requests to. auth_function: A function that takes no arguments and returns an (email, password) tuple when called. Will be called if authentication is required. user_agent: The user-agent string to send to the server. Specify None to omit the user-agent header. source: The source to specify in authentication requests. host_override: The host header to send to the server (defaults to host). extra_headers: A dict of extra headers to append to every request. Values supplied here will override other default headers that are supplied. save_cookies: If True, save the authentication cookies to local disk. If False, use an in-memory cookiejar instead. Subclasses must implement this functionality. Defaults to False. auth_tries: The number of times to attempt auth_function before failing. account_type: One of GOOGLE, HOSTED_OR_GOOGLE, or None for automatic. debug_data: Whether debugging output should include data contents. secure: If the requests sent using Send should be sent over HTTPS. ignore_certs: If the certificate mismatches should be ignored. rpc_tries: The number of rpc retries upon http server error (i.e. Response code >= 500 and < 600) before failing. options: the command line options (ignored in this implementation). """ if secure: self.scheme = "https" else: self.scheme = "http" self.ignore_certs = ignore_certs self.host = host self.host_override = host_override self.auth_function = auth_function self.source = source self.authenticated = False self.auth_tries = auth_tries self.debug_data = debug_data self.rpc_tries = rpc_tries self.account_type = account_type self.extra_headers = {} if user_agent: self.extra_headers["User-Agent"] = user_agent if extra_headers: self.extra_headers.update(extra_headers) self.save_cookies = save_cookies self.cookie_jar = cookielib.MozillaCookieJar() self.opener = self._GetOpener() if self.host_override: logger.debug("Server: %s; Host: %s", self.host, self.host_override) else: logger.debug("Server: %s", self.host) if ((self.host_override and self.host_override == "localhost") or self.host == "localhost" or self.host.startswith("localhost:")): self._DevAppServerAuthenticate() def _GetOpener(self): """Returns an OpenerDirector for making HTTP requests. Returns: A urllib2.OpenerDirector object. """ raise NotImplementedError def _CreateRequest(self, url, data=None): """Creates a new urllib request.""" req = fancy_urllib.FancyRequest(url, data=data) if self.host_override: req.add_header("Host", self.host_override) for key, value in self.extra_headers.iteritems(): req.add_header(key, value) return req def _GetAuthToken(self, email, password): """Uses ClientLogin to authenticate the user, returning an auth token. Args: email: The user's email address password: The user's password Raises: ClientLoginError: If there was an error authenticating with ClientLogin. HTTPError: If there was some other form of HTTP error. Returns: The authentication token returned by ClientLogin. """ account_type = self.account_type if not account_type: if (self.host.split(':')[0].endswith(".google.com") or (self.host_override and self.host_override.split(':')[0].endswith(".google.com"))): account_type = "HOSTED_OR_GOOGLE" else: account_type = "GOOGLE" data = { "Email": email, "Passwd": password, "service": "ah", "source": self.source, "accountType": account_type } req = self._CreateRequest( url=("https://%s/accounts/ClientLogin" % os.getenv("APPENGINE_AUTH_SERVER", "www.google.com")), data=urllib.urlencode(data)) try: response = self.opener.open(req) response_body = response.read() response_dict = dict(x.split("=") for x in response_body.split("\n") if x) if os.getenv("APPENGINE_RPC_USE_SID", "0") == "1": self.extra_headers["Cookie"] = ( 'SID=%s; Path=/;' % response_dict["SID"]) return response_dict["Auth"] except urllib2.HTTPError, e: if e.code == 403: body = e.read() response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict) else: raise def _GetAuthCookie(self, auth_token): """Fetches authentication cookies for an authentication token. Args: auth_token: The authentication token returned by ClientLogin. Raises: HTTPError: If there was an error fetching the authentication cookies. """ continue_location = "http://localhost/" args = {"continue": continue_location, "auth": auth_token} login_path = os.environ.get("APPCFG_LOGIN_PATH", "/_ah") req = self._CreateRequest("%s://%s%s/login?%s" % (self.scheme, self.host, login_path, urllib.urlencode(args))) try: response = self.opener.open(req) except urllib2.HTTPError, e: response = e if (response.code != 302 or response.info()["location"] != continue_location): raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp) self.authenticated = True def _Authenticate(self): """Authenticates the user. The authentication process works as follows: 1) We get a username and password from the user 2) We use ClientLogin to obtain an AUTH token for the user (see http://code.google.com/apis/accounts/AuthForInstalledApps.html). 3) We pass the auth token to /_ah/login on the server to obtain an authentication cookie. If login was successful, it tries to redirect us to the URL we provided. If we attempt to access the upload API without first obtaining an authentication cookie, it returns a 401 response and directs us to authenticate ourselves with ClientLogin. """ for unused_i in range(self.auth_tries): credentials = self.auth_function() try: auth_token = self._GetAuthToken(credentials[0], credentials[1]) if os.getenv("APPENGINE_RPC_USE_SID", "0") == "1": return except ClientLoginError, e: if e.reason == "CaptchaRequired": print >>sys.stderr, ( "Please go to\n" "https://www.google.com/accounts/DisplayUnlockCaptcha\n" "and verify you are a human. Then try again.") break if e.reason == "NotVerified": print >>sys.stderr, "Account not verified." break if e.reason == "TermsNotAgreed": print >>sys.stderr, "User has not agreed to TOS." break if e.reason == "AccountDeleted": print >>sys.stderr, "The user account has been deleted." break if e.reason == "AccountDisabled": print >>sys.stderr, "The user account has been disabled." break if e.reason == "ServiceDisabled": print >>sys.stderr, ("The user's access to the service has been " "disabled.") break if e.reason == "ServiceUnavailable": print >>sys.stderr, "The service is not available; try again later." break raise self._GetAuthCookie(auth_token) return @staticmethod def _CreateDevAppServerCookieData(email, admin): """Creates cookie payload data. Args: email: The user's email address. admin: True if the user is an admin; False otherwise. Returns: String containing the cookie payload. """ if email: user_id_digest = hashlib.md5(email.lower()).digest() user_id = "1" + "".join(["%02d" % ord(x) for x in user_id_digest])[:20] else: user_id = "" return "%s:%s:%s" % (email, bool(admin), user_id) def _DevAppServerAuthenticate(self): """Authenticates the user on the dev_appserver.""" credentials = self.auth_function() value = self._CreateDevAppServerCookieData(credentials[0], True) self.extra_headers["Cookie"] = ('dev_appserver_login="%s"; Path=/;' % value) def Send(self, request_path, payload="", content_type="application/octet-stream", timeout=None, **kwargs): """Sends an RPC and returns the response. Args: request_path: The path to send the request to, eg /api/appversion/create. payload: The body of the request, or None to send an empty request. content_type: The Content-Type header to use. timeout: timeout in seconds; default None i.e. no timeout. (Note: for large requests on OS X, the timeout doesn't work right.) kwargs: Any keyword arguments are converted into query string parameters. Returns: The response body, as a string. """ old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) try: tries = 0 auth_tried = False while True: tries += 1 url = "%s://%s%s" % (self.scheme, self.host, request_path) if kwargs: url += "?" + urllib.urlencode(sorted(kwargs.items())) req = self._CreateRequest(url=url, data=payload) req.add_header("Content-Type", content_type) req.add_header("X-appcfg-api-version", "1") try: logger.debug('Sending %s request:\n%s', self.scheme.upper(), HttpRequestToString(req, include_data=self.debug_data)) f = self.opener.open(req) response = f.read() f.close() return response except urllib2.HTTPError, e: logger.debug("Got http error, this is try %d: %s", tries, e) if tries > self.rpc_tries: raise elif e.code == 401: if auth_tried: raise auth_tried = True self._Authenticate() elif e.code >= 500 and e.code < 600: continue elif e.code == 302: if auth_tried: raise auth_tried = True loc = e.info()["location"] logger.debug("Got 302 redirect. Location: %s", loc) if loc.startswith("https://www.google.com/accounts/ServiceLogin"): self._Authenticate() elif re.match( r"https://www\.google\.com/a/[a-z0-9\.\-]+/ServiceLogin", loc): self.account_type = os.getenv("APPENGINE_RPC_HOSTED_LOGIN_TYPE", "HOSTED") self._Authenticate() elif loc.startswith("http://%s/_ah/login" % (self.host,)): self._DevAppServerAuthenticate() else: raise else: raise finally: socket.setdefaulttimeout(old_timeout) class ContentEncodingHandler(urllib2.BaseHandler): """Request and handle HTTP Content-Encoding.""" def http_request(self, request): request.add_header("Accept-Encoding", "gzip") for header in request.headers: if header.lower() == "user-agent": request.headers[header] += " gzip" return request https_request = http_request def http_response(self, req, resp): """Handle encodings in the order that they are encountered.""" encodings = [] headers = resp.headers encoding_header = None for header in headers: if header.lower() == "content-encoding": encoding_header = header for encoding in headers[header].split(","): encoding = encoding.strip() if encoding: encodings.append(encoding) break if not encodings: return resp del headers[encoding_header] fp = resp while encodings and encodings[-1].lower() == "gzip": fp = cStringIO.StringIO(fp.read()) fp = gzip.GzipFile(fileobj=fp, mode="r") encodings.pop() if encodings: headers[encoding_header] = ", ".join(encodings) logger.warning("Unrecognized Content-Encoding: %s", encodings[-1]) msg = resp.msg if sys.version_info >= (2, 6): resp = urllib2.addinfourl(fp, headers, resp.url, resp.code) else: response_code = resp.code resp = urllib2.addinfourl(fp, headers, resp.url) resp.code = response_code resp.msg = msg return resp https_response = http_response class HttpRpcServer(AbstractRpcServer): """Provides a simplified RPC-style interface for HTTP requests.""" DEFAULT_COOKIE_FILE_PATH = "~/.appcfg_cookies" def __init__(self, *args, **kwargs): self.certpath = os.path.normpath(os.path.join( os.path.dirname(__file__), '..', '..', '..', 'lib', 'cacerts', 'cacerts.txt')) self.cert_file_available = ((not kwargs.get("ignore_certs", False)) and os.path.exists(self.certpath)) super(HttpRpcServer, self).__init__(*args, **kwargs) def _CreateRequest(self, url, data=None): """Creates a new urllib request.""" req = super(HttpRpcServer, self)._CreateRequest(url, data) if self.cert_file_available and fancy_urllib.can_validate_certs(): req.set_ssl_info(ca_certs=self.certpath) return req def _CheckCookie(self): """Warn if cookie is not valid for at least one minute.""" min_expire = time.time() + 60 for cookie in self.cookie_jar: if cookie.domain == self.host and not cookie.is_expired(min_expire): break else: print >>sys.stderr, "\nError: Machine system clock is incorrect.\n" def _Authenticate(self): """Save the cookie jar after authentication.""" if self.cert_file_available and not fancy_urllib.can_validate_certs(): logger.warn("""ssl module not found. Without the ssl module, the identity of the remote host cannot be verified, and connections may NOT be secure. To fix this, please install the ssl module from http://pypi.python.org/pypi/ssl . To learn more, see https://developers.google.com/appengine/kb/general#rpcssl""") super(HttpRpcServer, self)._Authenticate() if self.cookie_jar.filename is not None and self.save_cookies: logger.debug("Saving authentication cookies to %s", self.cookie_jar.filename) self.cookie_jar.save() self._CheckCookie() def _GetOpener(self): """Returns an OpenerDirector that supports cookies and ignores redirects. Returns: A urllib2.OpenerDirector object. """ opener = urllib2.OpenerDirector() opener.add_handler(fancy_urllib.FancyProxyHandler()) opener.add_handler(urllib2.UnknownHandler()) opener.add_handler(urllib2.HTTPHandler()) opener.add_handler(urllib2.HTTPDefaultErrorHandler()) opener.add_handler(fancy_urllib.FancyHTTPSHandler()) opener.add_handler(urllib2.HTTPErrorProcessor()) opener.add_handler(ContentEncodingHandler()) if self.save_cookies: self.cookie_jar.filename = os.path.expanduser( HttpRpcServer.DEFAULT_COOKIE_FILE_PATH) if os.path.exists(self.cookie_jar.filename): try: self.cookie_jar.load() self.authenticated = True logger.debug("Loaded authentication cookies from %s", self.cookie_jar.filename) except (OSError, IOError, cookielib.LoadError), e: logger.debug("Could not load authentication cookies; %s: %s", e.__class__.__name__, e) self.cookie_jar.filename = None else: try: fd = os.open(self.cookie_jar.filename, os.O_CREAT, 0600) os.close(fd) except (OSError, IOError), e: logger.debug("Could not create authentication cookies file; %s: %s", e.__class__.__name__, e) self.cookie_jar.filename = None opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) return opener
32.216172
80
0.640783
acedd0948a1e0e5463556798206a731eeeb705e5
2,963
py
Python
humpday/comparison/timing.py
MDCHAMP/humpday
45e2cea95ae951d991ebc6c1e98314cc8c726f25
[ "MIT" ]
53
2021-02-13T01:17:02.000Z
2022-03-16T10:07:29.000Z
humpday/comparison/timing.py
MDCHAMP/humpday
45e2cea95ae951d991ebc6c1e98314cc8c726f25
[ "MIT" ]
16
2021-02-13T17:42:06.000Z
2022-03-06T10:08:50.000Z
humpday/comparison/timing.py
MDCHAMP/humpday
45e2cea95ae951d991ebc6c1e98314cc8c726f25
[ "MIT" ]
12
2020-12-09T03:16:22.000Z
2022-02-23T09:34:00.000Z
from humpday.objectives.allobjectives import A_CLASSIC_OBJECTIVE from humpday.optimizers.alloptimizers import OPTIMIZERS from humpday.comparison.limitations import max_n_dim import json from getjson import getjson import time import warnings from pprint import pprint import traceback import sys import random # Generate lookup of optimizer times. This can take a while. def get_timing(): return getjson('https://raw.githubusercontent.com/microprediction/humpday/main/humpday/comparison/timing.json') def exclude(name,n_dim,n_trials): return 'shgo' in name and n_dim>13 def create_timing(max_elapsed=5*60): """ Add to the timing JSON file """ try: with open('timing.json', 'rt') as fp: cpu = json.load(fp=fp) except Exception: cpu = dict() for n_dim in [2, 3, 5, 8]: print(' ') print('*********** Dimension '+str(n_dim)+' ************') print(' ') for opt in OPTIMIZERS: print(opt.__name__) dim_okay = n_dim <= max_n_dim(opt.__name__) if not cpu.get(opt.__name__): cpu[opt.__name__] = dict() n_failures = 0 elapsed = -1 if not cpu[opt.__name__].get(n_dim): cpu[opt.__name__][n_dim]=dict() for j,n_trials in enumerate([20,30,50,80,130,210,340,550]): print(' '*j+str(n_trials)) if exclude(opt.__name__, n_dim, n_trials): cpu[opt.__name__][n_dim][n_trials] = -1 if not cpu[opt.__name__][n_dim].get(n_trials): tck = time.time() try: best_val,best_x = opt(objective=A_CLASSIC_OBJECTIVE,n_trials=n_trials,n_dim=n_dim) elapsed = time.time()-tck cpu[opt.__name__][n_dim][n_trials] = elapsed if elapsed>60: pprint({'name':opt.__name__,'n_dim':n_dim,'n_trials':n_trials,"elapsed":elapsed}) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() warnings.warn(opt.__name__+' failed ') pprint({"name":opt.__name__,"n_dim":n_dim,"n_trials":n_trials}) traceback.print_tb(tb=exc_traceback) cpu[opt.__name__][n_dim][n_trials] = -1 n_failures+=1 if n_failures>=1: break if elapsed>max_elapsed: break if n_failures >= 1: break if elapsed > 60 * 10: break with open('timing.json','wt') as fp: json.dump(cpu,fp=fp) if __name__=='__main__': create_timing()
35.27381
115
0.517719
acedd2a84b25d0e313700c6cc0b481b9c409a545
498
py
Python
databases/migrations/2021_12_07_145520_CPU.py
knguyen111601/test_penguin_project_4_backend
345551beffd56f835bdfe6d2b49ca0523e49a2f3
[ "MIT" ]
null
null
null
databases/migrations/2021_12_07_145520_CPU.py
knguyen111601/test_penguin_project_4_backend
345551beffd56f835bdfe6d2b49ca0523e49a2f3
[ "MIT" ]
null
null
null
databases/migrations/2021_12_07_145520_CPU.py
knguyen111601/test_penguin_project_4_backend
345551beffd56f835bdfe6d2b49ca0523e49a2f3
[ "MIT" ]
null
null
null
"""CPU Migration.""" from masoniteorm.migrations import Migration class CPU(Migration): def up(self): """ Run the migrations. """ with self.schema.create("cpus") as table: table.increments("id") table.string("cpu_name") table.integer("cpu_price") table.string("cpu_img") table.timestamps() def down(self): """ Revert the migrations. """ self.schema.drop("cpus")
21.652174
49
0.528112
acedd2fdb86e4cd93cb34bd1bf03959606ad4596
3,128
py
Python
lib/spack/spack/compilers/xl.py
SimeonEhrig/spack
7fe0230492ecf0e497a84d578ea163570cf460eb
[ "ECL-2.0", "Apache-2.0", "MIT" ]
2
2016-01-12T20:14:40.000Z
2017-06-16T07:03:46.000Z
lib/spack/spack/compilers/xl.py
SimeonEhrig/spack
7fe0230492ecf0e497a84d578ea163570cf460eb
[ "ECL-2.0", "Apache-2.0", "MIT" ]
75
2016-07-27T11:43:00.000Z
2020-12-08T15:56:53.000Z
lib/spack/spack/compilers/xl.py
SimeonEhrig/spack
7fe0230492ecf0e497a84d578ea163570cf460eb
[ "ECL-2.0", "Apache-2.0", "MIT" ]
8
2015-10-16T13:51:49.000Z
2021-10-18T13:58:03.000Z
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.compiler import Compiler, UnsupportedCompilerFlag from spack.version import ver class Xl(Compiler): # Subclasses use possible names of C compiler cc_names = ['xlc'] # Subclasses use possible names of C++ compiler cxx_names = ['xlC', 'xlc++'] # Subclasses use possible names of Fortran 77 compiler f77_names = ['xlf'] # Subclasses use possible names of Fortran 90 compiler fc_names = ['xlf90', 'xlf95', 'xlf2003', 'xlf2008'] # Named wrapper links within build_env_path link_paths = {'cc': 'xl/xlc', 'cxx': 'xl/xlc++', 'f77': 'xl/xlf', 'fc': 'xl/xlf90'} version_argument = '-qversion' version_regex = r'([0-9]?[0-9]\.[0-9])' @property def openmp_flag(self): return "-qsmp=omp" @property def cxx11_flag(self): if self.version < ver('13.1'): raise UnsupportedCompilerFlag(self, "the C++11 standard", "cxx11_flag", "< 13.1") else: return "-qlanglvl=extended0x" @property def pic_flag(self): return "-qpic" @property def fflags(self): # The -qzerosize flag is effective only for the Fortran 77 # compilers and allows the use of zero size objects. # For Fortran 90 and beyond, it is set by default and has not impact. # Its use has no negative side effects. return "-qzerosize" @classmethod def fc_version(cls, fc): # The fortran and C/C++ versions of the XL compiler are always # two units apart. By this we mean that the fortran release that # goes with XL C/C++ 11.1 is 13.1. Having such a difference in # version number is confusing spack quite a lot. Most notably # if you keep the versions as is the default xl compiler will # only have fortran and no C/C++. So we associate the Fortran # compiler with the version associated to the C/C++ compiler. # One last stumble. Version numbers over 10 have at least a .1 # those under 10 a .0. There is no xlf 9.x or under currently # available. BG/P and BG/L can such a compiler mix and possibly # older version of AIX and linux on power. fortran_version = cls.default_version(fc) if fortran_version >= 16: # Starting with version 16.1, the XL C and Fortran compilers # have the same version. So no need to downgrade the Fortran # compiler version to match that of the C compiler version. return str(fortran_version) c_version = float(fortran_version) - 2 if c_version < 10: c_version = c_version - 0.1 return str(c_version) @classmethod def f77_version(cls, f77): return cls.fc_version(f77)
36.8
77
0.605818
acedd389b3267c92639c8ee1c1f06e0c53e30bc8
25,288
py
Python
src/_pytest/doctest.py
JarnoRFB/pytest
44cd8a3a86354b2b686d0b64f2ac328aca574bc7
[ "MIT" ]
1
2020-09-19T06:23:11.000Z
2020-09-19T06:23:11.000Z
src/_pytest/doctest.py
JarnoRFB/pytest
44cd8a3a86354b2b686d0b64f2ac328aca574bc7
[ "MIT" ]
null
null
null
src/_pytest/doctest.py
JarnoRFB/pytest
44cd8a3a86354b2b686d0b64f2ac328aca574bc7
[ "MIT" ]
null
null
null
"""Discover and run doctests in modules and test files.""" import bdb import inspect import platform import sys import traceback import types import warnings from contextlib import contextmanager from typing import Any from typing import Callable from typing import Dict from typing import Generator from typing import Iterable from typing import List from typing import Optional from typing import Pattern from typing import Sequence from typing import Tuple from typing import Union import py.path import pytest from _pytest import outcomes from _pytest._code.code import ExceptionInfo from _pytest._code.code import ReprFileLocation from _pytest._code.code import TerminalRepr from _pytest._io import TerminalWriter from _pytest.compat import safe_getattr from _pytest.compat import TYPE_CHECKING from _pytest.config import Config from _pytest.config.argparsing import Parser from _pytest.fixtures import FixtureRequest from _pytest.outcomes import OutcomeException from _pytest.pathlib import import_path from _pytest.python_api import approx from _pytest.warning_types import PytestWarning if TYPE_CHECKING: import doctest from typing import Type DOCTEST_REPORT_CHOICE_NONE = "none" DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" DOCTEST_REPORT_CHOICE_UDIFF = "udiff" DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" DOCTEST_REPORT_CHOICES = ( DOCTEST_REPORT_CHOICE_NONE, DOCTEST_REPORT_CHOICE_CDIFF, DOCTEST_REPORT_CHOICE_NDIFF, DOCTEST_REPORT_CHOICE_UDIFF, DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, ) # Lazy definition of runner class RUNNER_CLASS = None # Lazy definition of output checker class CHECKER_CLASS = None # type: Optional[Type[doctest.OutputChecker]] def pytest_addoption(parser: Parser) -> None: parser.addini( "doctest_optionflags", "option flags for doctests", type="args", default=["ELLIPSIS"], ) parser.addini( "doctest_encoding", "encoding used for doctest files", default="utf-8" ) group = parser.getgroup("collect") group.addoption( "--doctest-modules", action="store_true", default=False, help="run doctests in all .py modules", dest="doctestmodules", ) group.addoption( "--doctest-report", type=str.lower, default="udiff", help="choose another output format for diffs on doctest failure", choices=DOCTEST_REPORT_CHOICES, dest="doctestreport", ) group.addoption( "--doctest-glob", action="append", default=[], metavar="pat", help="doctests file matching pattern, default: test*.txt", dest="doctestglob", ) group.addoption( "--doctest-ignore-import-errors", action="store_true", default=False, help="ignore doctest ImportErrors", dest="doctest_ignore_import_errors", ) group.addoption( "--doctest-continue-on-failure", action="store_true", default=False, help="for a given doctest, continue to run after the first failure", dest="doctest_continue_on_failure", ) def pytest_unconfigure() -> None: global RUNNER_CLASS RUNNER_CLASS = None def pytest_collect_file( path: py.path.local, parent ) -> Optional[Union["DoctestModule", "DoctestTextfile"]]: config = parent.config if path.ext == ".py": if config.option.doctestmodules and not _is_setup_py(path): mod = DoctestModule.from_parent(parent, fspath=path) # type: DoctestModule return mod elif _is_doctest(config, path, parent): txt = DoctestTextfile.from_parent(parent, fspath=path) # type: DoctestTextfile return txt return None def _is_setup_py(path: py.path.local) -> bool: if path.basename != "setup.py": return False contents = path.read_binary() return b"setuptools" in contents or b"distutils" in contents def _is_doctest(config: Config, path: py.path.local, parent) -> bool: if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): return True globs = config.getoption("doctestglob") or ["test*.txt"] for glob in globs: if path.check(fnmatch=glob): return True return False class ReprFailDoctest(TerminalRepr): def __init__( self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]] ) -> None: self.reprlocation_lines = reprlocation_lines def toterminal(self, tw: TerminalWriter) -> None: for reprlocation, lines in self.reprlocation_lines: for line in lines: tw.line(line) reprlocation.toterminal(tw) class MultipleDoctestFailures(Exception): def __init__(self, failures: "Sequence[doctest.DocTestFailure]") -> None: super().__init__() self.failures = failures def _init_runner_class() -> "Type[doctest.DocTestRunner]": import doctest class PytestDoctestRunner(doctest.DebugRunner): """Runner to collect failures. Note that the out variable in this case is a list instead of a stdout-like object. """ def __init__( self, checker: Optional[doctest.OutputChecker] = None, verbose: Optional[bool] = None, optionflags: int = 0, continue_on_failure: bool = True, ) -> None: doctest.DebugRunner.__init__( self, checker=checker, verbose=verbose, optionflags=optionflags ) self.continue_on_failure = continue_on_failure def report_failure( self, out, test: "doctest.DocTest", example: "doctest.Example", got: str, ) -> None: failure = doctest.DocTestFailure(test, example, got) if self.continue_on_failure: out.append(failure) else: raise failure def report_unexpected_exception( self, out, test: "doctest.DocTest", example: "doctest.Example", exc_info: "Tuple[Type[BaseException], BaseException, types.TracebackType]", ) -> None: if isinstance(exc_info[1], OutcomeException): raise exc_info[1] if isinstance(exc_info[1], bdb.BdbQuit): outcomes.exit("Quitting debugger") failure = doctest.UnexpectedException(test, example, exc_info) if self.continue_on_failure: out.append(failure) else: raise failure return PytestDoctestRunner def _get_runner( checker: Optional["doctest.OutputChecker"] = None, verbose: Optional[bool] = None, optionflags: int = 0, continue_on_failure: bool = True, ) -> "doctest.DocTestRunner": # We need this in order to do a lazy import on doctest global RUNNER_CLASS if RUNNER_CLASS is None: RUNNER_CLASS = _init_runner_class() # Type ignored because the continue_on_failure argument is only defined on # PytestDoctestRunner, which is lazily defined so can't be used as a type. return RUNNER_CLASS( # type: ignore checker=checker, verbose=verbose, optionflags=optionflags, continue_on_failure=continue_on_failure, ) class DoctestItem(pytest.Item): def __init__( self, name: str, parent: "Union[DoctestTextfile, DoctestModule]", runner: Optional["doctest.DocTestRunner"] = None, dtest: Optional["doctest.DocTest"] = None, ) -> None: super().__init__(name, parent) self.runner = runner self.dtest = dtest self.obj = None self.fixture_request = None # type: Optional[FixtureRequest] @classmethod def from_parent( # type: ignore cls, parent: "Union[DoctestTextfile, DoctestModule]", *, name: str, runner: "doctest.DocTestRunner", dtest: "doctest.DocTest" ): # incompatible signature due to to imposed limits on sublcass """The public named constructor.""" return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) def setup(self) -> None: if self.dtest is not None: self.fixture_request = _setup_fixtures(self) globs = dict(getfixture=self.fixture_request.getfixturevalue) for name, value in self.fixture_request.getfixturevalue( "doctest_namespace" ).items(): globs[name] = value self.dtest.globs.update(globs) def runtest(self) -> None: assert self.dtest is not None assert self.runner is not None _check_all_skipped(self.dtest) self._disable_output_capturing_for_darwin() failures = [] # type: List[doctest.DocTestFailure] # Type ignored because we change the type of `out` from what # doctest expects. self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] if failures: raise MultipleDoctestFailures(failures) def _disable_output_capturing_for_darwin(self) -> None: """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" if platform.system() != "Darwin": return capman = self.config.pluginmanager.getplugin("capturemanager") if capman: capman.suspend_global_capture(in_=True) out, err = capman.read_global_capture() sys.stdout.write(out) sys.stderr.write(err) # TODO: Type ignored -- breaks Liskov Substitution. def repr_failure( # type: ignore[override] self, excinfo: ExceptionInfo[BaseException], ) -> Union[str, TerminalRepr]: import doctest failures = ( None ) # type: Optional[Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]] if isinstance( excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) ): failures = [excinfo.value] elif isinstance(excinfo.value, MultipleDoctestFailures): failures = excinfo.value.failures if failures is not None: reprlocation_lines = [] for failure in failures: example = failure.example test = failure.test filename = test.filename if test.lineno is None: lineno = None else: lineno = test.lineno + example.lineno + 1 message = type(failure).__name__ # TODO: ReprFileLocation doesn't expect a None lineno. reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] checker = _get_checker() report_choice = _get_report_choice( self.config.getoption("doctestreport") ) if lineno is not None: assert failure.test.docstring is not None lines = failure.test.docstring.splitlines(False) # add line numbers to the left of the error message assert test.lineno is not None lines = [ "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines) ] # trim docstring error lines to 10 lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] else: lines = [ "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" ] indent = ">>>" for line in example.source.splitlines(): lines.append("??? {} {}".format(indent, line)) indent = "..." if isinstance(failure, doctest.DocTestFailure): lines += checker.output_difference( example, failure.got, report_choice ).split("\n") else: inner_excinfo = ExceptionInfo(failure.exc_info) lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] lines += [ x.strip("\n") for x in traceback.format_exception(*failure.exc_info) ] reprlocation_lines.append((reprlocation, lines)) return ReprFailDoctest(reprlocation_lines) else: return super().repr_failure(excinfo) def reportinfo(self): assert self.dtest is not None return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name def _get_flag_lookup() -> Dict[str, int]: import doctest return dict( DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, ELLIPSIS=doctest.ELLIPSIS, IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, ALLOW_UNICODE=_get_allow_unicode_flag(), ALLOW_BYTES=_get_allow_bytes_flag(), NUMBER=_get_number_flag(), ) def get_optionflags(parent): optionflags_str = parent.config.getini("doctest_optionflags") flag_lookup_table = _get_flag_lookup() flag_acc = 0 for flag in optionflags_str: flag_acc |= flag_lookup_table[flag] return flag_acc def _get_continue_on_failure(config): continue_on_failure = config.getvalue("doctest_continue_on_failure") if continue_on_failure: # We need to turn off this if we use pdb since we should stop at # the first failure. if config.getvalue("usepdb"): continue_on_failure = False return continue_on_failure class DoctestTextfile(pytest.Module): obj = None def collect(self) -> Iterable[DoctestItem]: import doctest # Inspired by doctest.testfile; ideally we would use it directly, # but it doesn't support passing a custom checker. encoding = self.config.getini("doctest_encoding") text = self.fspath.read_text(encoding) filename = str(self.fspath) name = self.fspath.basename globs = {"__name__": "__main__"} optionflags = get_optionflags(self) runner = _get_runner( verbose=False, optionflags=optionflags, checker=_get_checker(), continue_on_failure=_get_continue_on_failure(self.config), ) parser = doctest.DocTestParser() test = parser.get_doctest(text, globs, name, filename, 0) if test.examples: yield DoctestItem.from_parent( self, name=test.name, runner=runner, dtest=test ) def _check_all_skipped(test: "doctest.DocTest") -> None: """Raise pytest.skip() if all examples in the given DocTest have the SKIP option set.""" import doctest all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) if all_skipped: pytest.skip("all tests skipped by +SKIP option") def _is_mocked(obj: object) -> bool: """Return if an object is possibly a mock object by checking the existence of a highly improbable attribute.""" return ( safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) is not None ) @contextmanager def _patch_unwrap_mock_aware() -> Generator[None, None, None]: """Context manager which replaces ``inspect.unwrap`` with a version that's aware of mock objects and doesn't recurse into them.""" real_unwrap = inspect.unwrap def _mock_aware_unwrap( func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None ) -> Any: try: if stop is None or stop is _is_mocked: return real_unwrap(func, stop=_is_mocked) _stop = stop return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) except Exception as e: warnings.warn( "Got %r when unwrapping %r. This is usually caused " "by a violation of Python's object protocol; see e.g. " "https://github.com/pytest-dev/pytest/issues/5080" % (e, func), PytestWarning, ) raise inspect.unwrap = _mock_aware_unwrap try: yield finally: inspect.unwrap = real_unwrap class DoctestModule(pytest.Module): def collect(self) -> Iterable[DoctestItem]: import doctest class MockAwareDocTestFinder(doctest.DocTestFinder): """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug. https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532 """ def _find_lineno(self, obj, source_lines): """Doctest code does not take into account `@property`, this is a hackish way to fix it. https://bugs.python.org/issue17446 """ if isinstance(obj, property): obj = getattr(obj, "fget", obj) # Type ignored because this is a private function. return doctest.DocTestFinder._find_lineno( # type: ignore self, obj, source_lines, ) def _find( self, tests, obj, name, module, source_lines, globs, seen ) -> None: if _is_mocked(obj): return with _patch_unwrap_mock_aware(): # Type ignored because this is a private function. doctest.DocTestFinder._find( # type: ignore self, tests, obj, name, module, source_lines, globs, seen ) if self.fspath.basename == "conftest.py": module = self.config.pluginmanager._importconftest( self.fspath, self.config.getoption("importmode") ) else: try: module = import_path(self.fspath) except ImportError: if self.config.getvalue("doctest_ignore_import_errors"): pytest.skip("unable to import module %r" % self.fspath) else: raise # Uses internal doctest module parsing mechanism. finder = MockAwareDocTestFinder() optionflags = get_optionflags(self) runner = _get_runner( verbose=False, optionflags=optionflags, checker=_get_checker(), continue_on_failure=_get_continue_on_failure(self.config), ) for test in finder.find(module, module.__name__): if test.examples: # skip empty doctests yield DoctestItem.from_parent( self, name=test.name, runner=runner, dtest=test ) def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest: """Used by DoctestTextfile and DoctestItem to setup fixture information.""" def func() -> None: pass doctest_item.funcargs = {} # type: ignore[attr-defined] fm = doctest_item.session._fixturemanager doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined] node=doctest_item, func=func, cls=None, funcargs=False ) fixture_request = FixtureRequest(doctest_item) fixture_request._fillfixtures() return fixture_request def _init_checker_class() -> "Type[doctest.OutputChecker]": import doctest import re class LiteralsOutputChecker(doctest.OutputChecker): # Based on doctest_nose_plugin.py from the nltk project # (https://github.com/nltk/nltk) and on the "numtest" doctest extension # by Sebastien Boisgerault (https://github.com/boisgera/numtest). _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) _number_re = re.compile( r""" (?P<number> (?P<mantissa> (?P<integer1> [+-]?\d*)\.(?P<fraction>\d+) | (?P<integer2> [+-]?\d+)\. ) (?: [Ee] (?P<exponent1> [+-]?\d+) )? | (?P<integer3> [+-]?\d+) (?: [Ee] (?P<exponent2> [+-]?\d+) ) ) """, re.VERBOSE, ) def check_output(self, want: str, got: str, optionflags: int) -> bool: if doctest.OutputChecker.check_output(self, want, got, optionflags): return True allow_unicode = optionflags & _get_allow_unicode_flag() allow_bytes = optionflags & _get_allow_bytes_flag() allow_number = optionflags & _get_number_flag() if not allow_unicode and not allow_bytes and not allow_number: return False def remove_prefixes(regex: Pattern[str], txt: str) -> str: return re.sub(regex, r"\1\2", txt) if allow_unicode: want = remove_prefixes(self._unicode_literal_re, want) got = remove_prefixes(self._unicode_literal_re, got) if allow_bytes: want = remove_prefixes(self._bytes_literal_re, want) got = remove_prefixes(self._bytes_literal_re, got) if allow_number: got = self._remove_unwanted_precision(want, got) return doctest.OutputChecker.check_output(self, want, got, optionflags) def _remove_unwanted_precision(self, want: str, got: str) -> str: wants = list(self._number_re.finditer(want)) gots = list(self._number_re.finditer(got)) if len(wants) != len(gots): return got offset = 0 for w, g in zip(wants, gots): fraction = w.group("fraction") # type: Optional[str] exponent = w.group("exponent1") # type: Optional[str] if exponent is None: exponent = w.group("exponent2") if fraction is None: precision = 0 else: precision = len(fraction) if exponent is not None: precision -= int(exponent) if float(w.group()) == approx(float(g.group()), abs=10 ** -precision): # They're close enough. Replace the text we actually # got with the text we want, so that it will match when we # check the string literally. got = ( got[: g.start() + offset] + w.group() + got[g.end() + offset :] ) offset += w.end() - w.start() - (g.end() - g.start()) return got return LiteralsOutputChecker def _get_checker() -> "doctest.OutputChecker": """Return a doctest.OutputChecker subclass that supports some additional options: * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' prefixes (respectively) in string literals. Useful when the same doctest should run in Python 2 and Python 3. * NUMBER to ignore floating-point differences smaller than the precision of the literal number in the doctest. An inner class is used to avoid importing "doctest" at the module level. """ global CHECKER_CLASS if CHECKER_CLASS is None: CHECKER_CLASS = _init_checker_class() return CHECKER_CLASS() def _get_allow_unicode_flag() -> int: """Register and return the ALLOW_UNICODE flag.""" import doctest return doctest.register_optionflag("ALLOW_UNICODE") def _get_allow_bytes_flag() -> int: """Register and return the ALLOW_BYTES flag.""" import doctest return doctest.register_optionflag("ALLOW_BYTES") def _get_number_flag() -> int: """Register and return the NUMBER flag.""" import doctest return doctest.register_optionflag("NUMBER") def _get_report_choice(key: str) -> int: """Return the actual `doctest` module flag value. We want to do it as late as possible to avoid importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. """ import doctest return { DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, DOCTEST_REPORT_CHOICE_NONE: 0, }[key] @pytest.fixture(scope="session") def doctest_namespace() -> Dict[str, Any]: """Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.""" return dict()
34.928177
100
0.610487
acedd3f010e9c79178bb812130d5103b4b41271a
1,681
py
Python
py4cytoscape/py4cytoscape_logger_settings.py
specter119/py4cytoscape
11f968a8ab6518354406c9ed8321f331355b54f0
[ "MIT" ]
21
2020-09-01T14:31:11.000Z
2022-03-08T01:16:35.000Z
py4cytoscape/py4cytoscape_logger_settings.py
specter119/py4cytoscape
11f968a8ab6518354406c9ed8321f331355b54f0
[ "MIT" ]
50
2020-08-21T00:45:46.000Z
2022-03-20T21:38:37.000Z
py4cytoscape/py4cytoscape_logger_settings.py
specter119/py4cytoscape
11f968a8ab6518354406c9ed8321f331355b54f0
[ "MIT" ]
3
2021-01-14T08:30:31.000Z
2021-08-04T07:58:17.000Z
# -*- coding: utf-8 -*- """Logging configuration values that can be set by a user. """ """Copyright 2020 The Cytoscape Consortium Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # Log level choices are here: https://docs.python.org/3/howto/logging.html#logging-levels # print(f'Starting {__name__} module') _SUMMARY_LOG_LEVEL = 'INFO' # 'DEBUG' to turn on, 'NOTSET' to turn off, 'INFO' to turn on just HTTP calls _SUMMARY_ENABLE_HTTP_CALLS = True _SUMMARY_ENABLE_HTTP_CONTENT = False _DETAIL_LOG_LEVEL = 'DEBUG' # 'DEBUG' to turn on, 'NOTSET' to turn off _DETAIL_ENABLE_HTTP_CALLS = True _DETAIL_ENABLE_HTTP_CONTENT = True _DETAIL_LOG_DIR = 'logs' _DETAIL_LOG_NAME = 'py4cytoscape.log'
45.432432
120
0.780488
acedd4b4bde384016734258951eedb66c61ed6fb
9,859
py
Python
ax/modelbridge/tests/test_torch_modelbridge.py
viotemp1/Ax
87e2f33a02c745b40b2ea1332e649f298347f35d
[ "MIT" ]
1
2022-01-04T12:20:51.000Z
2022-01-04T12:20:51.000Z
ax/modelbridge/tests/test_torch_modelbridge.py
viotemp1/Ax
87e2f33a02c745b40b2ea1332e649f298347f35d
[ "MIT" ]
null
null
null
ax/modelbridge/tests/test_torch_modelbridge.py
viotemp1/Ax
87e2f33a02c745b40b2ea1332e649f298347f35d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from unittest import mock import numpy as np import torch from ax.core.observation import ObservationFeatures from ax.modelbridge.array import ArrayModelBridge from ax.modelbridge.torch import TorchModelBridge from ax.modelbridge.transforms.base import Transform from ax.models.torch_base import TorchModel from ax.utils.common.testutils import TestCase class TorchModelBridgeTest(TestCase): @mock.patch( f"{ArrayModelBridge.__module__}.ArrayModelBridge.__init__", autospec=True, return_value=None, ) def testTorchModelBridge(self, mock_init): torch_dtype = torch.float64 torch_device = torch.device("cpu") ma = TorchModelBridge( experiment=None, search_space=None, data=None, model=None, transforms=[], torch_dtype=torch.float64, torch_device=torch.device("cpu"), ) self.assertEqual(ma.dtype, torch.float64) self.assertEqual(ma.device, torch.device("cpu")) self.assertFalse(mock_init.call_args[-1]["fit_out_of_design"]) # Test `fit`. model = mock.MagicMock(TorchModel, autospec=True, instance=True) X = np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]) Y = np.array([[3.0], [4.0]]) var = np.array([[1.0], [2.0]]) ma._model_fit( model=model, Xs=[X], Ys=[Y], Yvars=[var], bounds=None, feature_names=[], metric_names=[], task_features=[], fidelity_features=[], candidate_metadata=[], ) model_fit_args = model.fit.mock_calls[0][2] self.assertTrue( torch.equal( model_fit_args["Xs"][0], torch.tensor(X, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_fit_args["Ys"][0], torch.tensor(Y, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_fit_args["Yvars"][0], torch.tensor(var, dtype=torch_dtype, device=torch_device), ) ) # Test `update` (need to fill required fields before call to `_model_update`). ma.parameters = [] ma.outcomes = [] ma._model_update( Xs=[X], Ys=[Y], Yvars=[var], candidate_metadata=[], bounds=None, feature_names=[], metric_names=[], task_features=[], fidelity_features=[], target_fidelities=[], ) model_update_args = model.update.mock_calls[0][2] self.assertTrue( torch.equal( model_update_args["Xs"][0], torch.tensor(X, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_update_args["Ys"][0], torch.tensor(Y, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_update_args["Yvars"][0], torch.tensor(var, dtype=torch_dtype, device=torch_device), ) ) # Predict model.predict.return_value = (torch.tensor([3.0]), torch.tensor([4.0])) f, var = ma._model_predict(X) self.assertTrue( torch.equal( model.predict.mock_calls[0][2]["X"], torch.tensor(X, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue(np.array_equal(f, np.array([3.0]))) self.assertTrue(np.array_equal(var, np.array([4.0]))) # Gen model.gen.return_value = ( torch.tensor([1.0, 2.0, 3.0]), torch.tensor([1.0]), {}, [], ) X, w, _gen_metadata, _candidate_metadata = ma._model_gen( n=3, bounds=[(0, 1)], objective_weights=np.array([1.0, 0.0]), outcome_constraints=None, linear_constraints=None, fixed_features={1: 3.0}, pending_observations=[np.array([]), np.array([1.0, 2.0, 3.0])], model_gen_options={"option": "yes"}, rounding_func=np.round, target_fidelities=None, ) gen_args = model.gen.mock_calls[0][2] self.assertEqual(gen_args["n"], 3) self.assertEqual(gen_args["bounds"], [(0, 1)]) self.assertTrue( torch.equal( gen_args["objective_weights"], torch.tensor([1.0, 0.0], dtype=torch_dtype, device=torch_device), ) ) self.assertIsNone(gen_args["outcome_constraints"]) self.assertIsNone(gen_args["linear_constraints"]) self.assertEqual(gen_args["fixed_features"], {1: 3.0}) self.assertTrue( torch.equal( gen_args["pending_observations"][0], torch.tensor([], dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( gen_args["pending_observations"][1], torch.tensor([1.0, 2.0, 3.0], dtype=torch_dtype, device=torch_device), ) ) self.assertEqual(gen_args["model_gen_options"], {"option": "yes"}) self.assertIsNone(gen_args["target_fidelities"]) # check rounding function t = torch.tensor([0.1, 0.6], dtype=torch_dtype, device=torch_device) self.assertTrue(torch.equal(gen_args["rounding_func"](t), torch.round(t))) self.assertTrue(np.array_equal(X, np.array([1.0, 2.0, 3.0]))) self.assertTrue(np.array_equal(w, np.array([1.0]))) # Cross-validate model.cross_validate.return_value = (torch.tensor([3.0]), torch.tensor([4.0])) f, var = ma._model_cross_validate( Xs_train=[X], Ys_train=[Y], Yvars_train=[var], X_test=X, bounds=[(0, 1)], task_features=[], feature_names=[], metric_names=[], fidelity_features=[], ) model_cv_args = model.cross_validate.mock_calls[0][2] self.assertTrue( torch.equal( model_cv_args["Xs_train"][0], torch.tensor(X, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_cv_args["Ys_train"][0], torch.tensor(Y, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_cv_args["Yvars_train"][0], torch.tensor(var, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue( torch.equal( model_cv_args["X_test"], torch.tensor(X, dtype=torch_dtype, device=torch_device), ) ) self.assertTrue(np.array_equal(f, np.array([3.0]))) self.assertTrue(np.array_equal(var, np.array([4.0]))) # Transform observation features obsf = [ObservationFeatures(parameters={"x": 1.0, "y": 2.0})] ma.parameters = ["x", "y"] X = ma._transform_observation_features(obsf) self.assertTrue( torch.equal( X, torch.tensor([[1.0, 2.0]], dtype=torch_dtype, device=torch_device) ) ) # test fit out of design ma = TorchModelBridge( experiment=None, search_space=None, data=None, model=None, transforms=[], torch_dtype=torch.float64, torch_device=torch.device("cpu"), fit_out_of_design=True, ) self.assertTrue(mock_init.call_args[-1]["fit_out_of_design"]) @mock.patch(f"{TorchModel.__module__}.TorchModel", autospec=True) @mock.patch(f"{ArrayModelBridge.__module__}.ArrayModelBridge.__init__") def test_evaluate_acquisition_function(self, _, mock_torch_model): ma = TorchModelBridge( experiment=None, search_space=None, data=None, model=None, transforms=[], torch_dtype=torch.float64, torch_device=torch.device("cpu"), ) # These attributes would've been set by `ArrayModelBridge` __init__, but it's # mocked. ma.model = mock_torch_model() t = mock.MagicMock(Transform, autospec=True) t.transform_observation_features.return_value = [ ObservationFeatures(parameters={"x": 3.0, "y": 4.0}) ] ma.transforms = {"ExampleTransform": t} ma.parameters = ["x", "y"] model_eval_acqf = mock_torch_model.return_value.evaluate_acquisition_function model_eval_acqf.return_value = torch.tensor([5.0], dtype=torch.float64) acqf_vals = ma.evaluate_acquisition_function( observation_features=[ObservationFeatures(parameters={"x": 1.0, "y": 2.0})] ) self.assertEqual(acqf_vals, [5.0]) t.transform_observation_features.assert_called_with( [ObservationFeatures(parameters={"x": 1.0, "y": 2.0})] ) model_eval_acqf.assert_called_once() self.assertTrue( torch.equal( # `call_args` is an (args, kwargs) tuple model_eval_acqf.call_args[1]["X"], torch.tensor([[3.0, 4.0]], dtype=torch.float64), ) )
36.113553
87
0.550664
acedd4e903a02a7e6baffe8979b4c8c82079ac21
10,768
py
Python
backend/tf_cpn/mptest.py
zju-3dv/multi-person3dpose
38b958f423f2de2bf7562f5a386c27440eab8c53
[ "Apache-2.0" ]
391
2019-04-01T04:57:36.000Z
2022-03-25T08:08:57.000Z
backend/tf_cpn/mptest.py
christw16/mvpose
f1bf69d650c7d337e73e0bd63b4472737e49ecba
[ "Apache-2.0" ]
79
2019-04-04T15:39:45.000Z
2022-03-31T11:32:57.000Z
backend/tf_cpn/mptest.py
christw16/mvpose
f1bf69d650c7d337e73e0bd63b4472737e49ecba
[ "Apache-2.0" ]
84
2019-03-29T08:39:59.000Z
2022-03-17T05:47:30.000Z
import os import os.path as osp import numpy as np import argparse from config import cfg import cv2 import sys import time import tensorflow as tf from tfflat.base import Tester from tfflat.utils import mem_info from tfflat.logger import colorlogger from network import Network from lib_kernel.lib_nms.gpu_nms import gpu_nms from lib_kernel.lib_nms.cpu_nms import cpu_soft_nms from dataset import Preprocessing from COCOAllJoints import COCOJoints from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from pycocotools import mask as COCOmask from tqdm import tqdm def test_net(tester, logger, dets, det_range): # here we assume all boxes are pre-processed. nms_method = 'nms' nms_thresh = 1. min_scores = 1e-10 min_box_size = 0. # 8 ** 2 all_res = [] dump_results = [] start_time = time.time() img_start = det_range[0] while img_start < det_range[1]: img_end = img_start + 1 im_info = dets[img_start] while img_end < det_range[1] and dets[img_end]['image_id'] == im_info['image_id']: img_end += 1 test_data = dets[img_start:img_end] img_start = img_end iter_avg_cost_time = (time.time() - start_time) / (img_end - det_range[0]) print('ran %.ds >> << left %.ds' % ( iter_avg_cost_time * (img_end - det_range[0]), iter_avg_cost_time * (det_range[1] - img_end))) print('avg cost time: ', iter_avg_cost_time) all_res.append([]) # get box detections cls_dets = np.zeros((len(test_data), 5), dtype=np.float32) for i in range(len(test_data)): bbox = np.asarray(test_data[i]['bbox']) cls_dets[i, :4] = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]) cls_dets[i, 4] = np.array(test_data[i]['score']) # nms and filter keep = np.where((cls_dets[:, 4] >= min_scores) & ((cls_dets[:, 3] - cls_dets[:, 1]) * (cls_dets[:, 2] - cls_dets[:, 0]) >= min_box_size))[0] cls_dets = cls_dets[keep] if len(cls_dets) > 0: if nms_method == 'nms': keep = gpu_nms(cls_dets, nms_thresh) elif nms_method == 'soft': keep = cpu_soft_nms(np.ascontiguousarray(cls_dets, dtype=np.float32), method=2) else: assert False cls_dets = cls_dets[keep] test_data = np.asarray(test_data)[keep] if len(keep) == 0: continue # crop and detect keypoints cls_skeleton = np.zeros((len(test_data), cfg.nr_skeleton, 3)) crops = np.zeros((len(test_data), 4)) cfg.batch_size = 32 batch_size = cfg.batch_size // 2 for test_id in range(0, len(test_data), batch_size): start_id = test_id end_id = min(len(test_data), test_id + batch_size) test_imgs = [] details = [] for i in range(start_id, end_id): test_img, detail = Preprocessing(test_data[i], stage='test') test_imgs.append(test_img) details.append(detail) details = np.asarray(details) feed = test_imgs for i in range(end_id - start_id): ori_img = test_imgs[i][0].transpose(1, 2, 0) flip_img = cv2.flip(ori_img, 1) feed.append(flip_img.transpose(2, 0, 1)[np.newaxis, ...]) feed = np.vstack(feed) res = tester.predict_one([feed.transpose(0, 2, 3, 1).astype(np.float32)])[0] res = res.transpose(0, 3, 1, 2) for i in range(end_id - start_id): fmp = res[end_id - start_id + i].transpose((1, 2, 0)) fmp = cv2.flip(fmp, 1) fmp = list(fmp.transpose((2, 0, 1))) for (q, w) in cfg.symmetry: fmp[q], fmp[w] = fmp[w], fmp[q] fmp = np.array(fmp) res[i] += fmp res[i] /= 2 for test_image_id in range(start_id, end_id): r0 = res[test_image_id - start_id].copy() r0 /= 255. r0 += 0.5 for w in range(cfg.nr_skeleton): res[test_image_id - start_id, w] /= np.amax(res[test_image_id - start_id, w]) border = 10 dr = np.zeros((cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border)) dr[:, border:-border, border:-border] = res[test_image_id - start_id][:cfg.nr_skeleton].copy() for w in range(cfg.nr_skeleton): dr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0) for w in range(cfg.nr_skeleton): lb = dr[w].argmax() y, x = np.unravel_index(lb, dr[w].shape) dr[w, y, x] = 0 lb = dr[w].argmax() py, px = np.unravel_index(lb, dr[w].shape) y -= border x -= border py -= border + y px -= border + x ln = (px ** 2 + py ** 2) ** 0.5 delta = 0.25 if ln > 1e-3: x += delta * px / ln y += delta * py / ln x = max(0, min(x, cfg.output_shape[1] - 1)) y = max(0, min(y, cfg.output_shape[0] - 1)) cls_skeleton[test_image_id, w, :2] = (x * 4 + 2, y * 4 + 2) cls_skeleton[test_image_id, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)] # map back to original images crops[test_image_id, :] = details[test_image_id - start_id, :] for w in range(cfg.nr_skeleton): cls_skeleton[test_image_id, w, 0] = cls_skeleton[test_image_id, w, 0] / cfg.data_shape[1] * ( crops[test_image_id][2] - crops[test_image_id][0]) + crops[test_image_id][0] cls_skeleton[test_image_id, w, 1] = cls_skeleton[test_image_id, w, 1] / cfg.data_shape[0] * ( crops[test_image_id][3] - crops[test_image_id][1]) + crops[test_image_id][1] all_res[-1] = [cls_skeleton.copy(), cls_dets.copy()] cls_partsco = cls_skeleton[:, :, 2].copy().reshape(-1, cfg.nr_skeleton) cls_skeleton[:, :, 2] = 1 cls_scores = cls_dets[:, -1].copy() # rescore cls_dets[:, -1] = cls_scores * cls_partsco.mean(axis=1) cls_skeleton = np.concatenate( [cls_skeleton.reshape(-1, cfg.nr_skeleton * 3), (cls_scores * cls_partsco.mean(axis=1))[:, np.newaxis]], axis=1) for i in range(len(cls_skeleton)): result = dict(image_id=im_info['image_id'], category_id=1, score=float(round(cls_skeleton[i][-1], 4)), keypoints=cls_skeleton[i][:-1].round(3).tolist()) dump_results.append(result) return all_res, dump_results def test(test_model, logger): eval_gt = COCO(cfg.gt_path) import json with open(cfg.det_path, 'r') as f: dets = json.load(f) test_subset = False if test_subset: eval_gt.imgs = dict(list(eval_gt.imgs.items())[:100]) anns = dict() for i in eval_gt.imgs: for j in eval_gt.getAnnIds(i): anns[j] = eval_gt.anns[j] eval_gt.anns = anns dets = [i for i in dets if i['image_id'] in eval_gt.imgs] dets = [i for i in dets if i['category_id'] == 1] dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True) for i in dets: i['imgpath'] = 'val2014/COCO_val2014_000000%06d.jpg' % i['image_id'] img_num = len(np.unique([i['image_id'] for i in dets])) use_gtboxes = False if use_gtboxes: d = COCOJoints() coco_train_data, coco_test_data = d.load_data() coco_test_data.sort(key=lambda x: x['imgid']) for i in coco_test_data: i['image_id'] = i['imgid'] i['score'] = 1. dets = coco_test_data from tfflat.mp_utils import MultiProc img_start = 0 ranges = [0] images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1 for run_img in range(img_num): img_end = img_start + 1 while img_end < len(dets) and dets[img_end]['image_id'] == dets[img_start]['image_id']: img_end += 1 if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num: ranges.append(img_end) img_start = img_end def func(id): cfg.set_args(args.gpu_ids.split(',')[id]) tester = Tester(Network(), cfg) tester.load_weights(test_model) range = [ranges[id], ranges[id + 1]] print('Range: ', range) return test_net(tester, logger, dets, range) MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func, dump_method=1) all_res, dump_results = MultiGPUFunc.work() # evaluation result_path = osp.join(cfg.output_dir, 'results.json') with open(result_path, 'w') as f: json.dump(dump_results, f) eval_dt = eval_gt.loadRes(result_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if __name__ == '__main__': def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', '-d', type=str, dest='gpu_ids') parser.add_argument('--range', '-r', type=str, dest='test_epochs') parser.add_argument('--model', '-m', type=str, dest='test_model') args = parser.parse_args() # test gpus if not args.gpu_ids: args.gpu_ids = str(np.argmin(mem_info())) if '-' in args.gpu_ids: gpus = args.gpu_ids.split('-') gpus[0] = 0 if not gpus[0].isdigit() else int(gpus[0]) gpus[1] = len(mem_info()) if not gpus[1].isdigit() else int(gpus[1]) + 1 args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus)))) if args.test_epochs and not ',' in args.test_epochs: args.test_epochs = '%d,%d' % (int(args.test_epochs), int(args.test_epochs) + 1) assert args.test_epochs or args.test_model, 'Test epoch or model to test is required.' return args global args args = parse_args() if args.test_model: logger = colorlogger(cfg.output_dir, 'test_model_{}'.format(args.test_model.split('/')[-1].split('.')[0])) test(args.test_model, logger) else: for i in range(*eval(args.test_epochs)): log_name = 'test_epoch_{}.logs'.format(i) logger = colorlogger(cfg.output_dir, log_name) test(i, logger)
39.588235
116
0.5586
acedd75111a86c3f8720e82d9994360e0f3a0c53
2,445
py
Python
tests/test_json_schema_compress.py
sthagen/python-json_schema_compress
dc145625ddf6b79474a227e9b0f8115cd1e0f62a
[ "MIT" ]
null
null
null
tests/test_json_schema_compress.py
sthagen/python-json_schema_compress
dc145625ddf6b79474a227e9b0f8115cd1e0f62a
[ "MIT" ]
26
2020-07-30T08:13:57.000Z
2021-01-10T11:23:14.000Z
tests/test_json_schema_compress.py
sthagen/python-json_schema_compress
dc145625ddf6b79474a227e9b0f8115cd1e0f62a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # pylint: disable=missing-docstring,unused-import,reimported import json import pytest # type: ignore import json_schema_compress.json_schema_compress as jsc def test_process_ok_empty_array(): job = ['[]'] assert jsc.process(job) == job[0] def test_process_ok_empty_object(): job = ['{}'] assert jsc.process(job) == job[0] def test_process_ok_direct_json_text(capsys): job = ['{"a": "b", "c": 42, "d": [1, true, false, null, 3.1415, -999999999999999999999]}'] assert jsc.process(job) == job[0] def test_process_ok_direct_json_text_single_description_member_full_stop(capsys): job = [r'{"a": "b", "c": 42, "description": "The thing does stuff.\n\n But not yet enough"}'] compressed = r'{"a": "b", "c": 42, "description": "The thing does stuff."}' assert jsc.process(job) == compressed def test_process_ok_direct_json_text_single_description_member_newline_only(capsys): job = [r'{"a": "b", "c": 42, "description": "The thing does stuff\n\n But not yet enough"}'] compressed = r'{"a": "b", "c": 42, "description": "The thing does stuff"}' assert jsc.process(job) == compressed def test_process_ok_direct_json_text_single_description_member_no_edn_token(capsys): job = [r'{"a": "b", "c": 42, "description": "The thing does stuff endlessly without any full stop or newline"}'] compressed = r'{"a": "b", "c": 42, "description": "The thing does stuff endlessly without any full stop or newline"}' assert jsc.process(job) == compressed def test_process_ok_direct_json_text_two_description_members(capsys): job = [r'{"a": {"description": "An A. Of course"}, "b": {"description": "A B. Maybe"}}'] compressed = r'{"a": {"description": "An A."}, "b": {"description": "A B."}}' assert jsc.process(job) == compressed def test_process_nok_wrong_type_string(): bad = ["bad"] message = r"Expecting value: line 1 column 1 \(char 0\)" with pytest.raises(json.decoder.JSONDecodeError, match=message): jsc.process(bad) def test_extract_paths_ok_direct_simple_json_text(): job = [r'{"a": "b", "c": 42, "description": "The thing does stuff."}'] assert jsc.extract_paths(json.loads(job[0])) == ['a', 'c', 'description'] def test_extract_paths_ok_direct_nested_json_text(): job = [r'{"a": {"a1": "b1", "a2": -1}, "c": 42, "d": [1, 2, 3]}'] assert jsc.extract_paths(json.loads(job[0])) == ['a/a1', 'a/a2', 'c', 'd']
38.809524
121
0.660532
acedd7a0944d8784bf933d66fe77b0849856dd14
1,083
py
Python
gQuant/plugins/cusignal_plugin/setup.py
t-triobox/gQuant
6ee3ba104ce4c6f17a5755e7782298902d125563
[ "Apache-2.0" ]
null
null
null
gQuant/plugins/cusignal_plugin/setup.py
t-triobox/gQuant
6ee3ba104ce4c6f17a5755e7782298902d125563
[ "Apache-2.0" ]
null
null
null
gQuant/plugins/cusignal_plugin/setup.py
t-triobox/gQuant
6ee3ba104ce4c6f17a5755e7782298902d125563
[ "Apache-2.0" ]
null
null
null
''' Greenflow Cusignal Plugin ''' from setuptools import setup, find_packages setup( name='greenflow_cusignal_plugin', version='1.0', description='greenflow cusignal plugin - RAPIDS Cusignal Nodes for Greenflow', # noqa: E501 install_requires=["greenflow", "cusignal"], packages=find_packages(include=['greenflow_cusignal_plugin', 'greenflow_cusignal_plugin.*']), entry_points={ 'greenflow.plugin': [ 'greenflow_cusignal_plugin = greenflow_cusignal_plugin', 'greenflow_cusignal_plugin.convolution = greenflow_cusignal_plugin.convolution', # noqa: E501 'greenflow_cusignal_plugin.filtering = greenflow_cusignal_plugin.filtering', # noqa: E501 'greenflow_cusignal_plugin.gensig = greenflow_cusignal_plugin.gensig', # noqa: E501 'greenflow_cusignal_plugin.spectral_analysis = greenflow_cusignal_plugin.spectral_analysis', # noqa: E501 'greenflow_cusignal_plugin.windows = greenflow_cusignal_plugin.windows' # noqa: E501 ], } )
45.125
118
0.697138
acedd87ed29f5bf5d30604c4c3c2750d08dba09b
783
bzl
Python
google/cloud/bigquery/samples/bigquery_client_mock_samples.bzl
VPeruS/google-cloud-cpp
727bc4f4676a75a97cec91247bcdd78741cf7204
[ "Apache-2.0" ]
1
2021-05-15T00:31:18.000Z
2021-05-15T00:31:18.000Z
google/cloud/bigquery/samples/bigquery_client_mock_samples.bzl
nschonni/google-cloud-cpp
0f5e785f14fc21315d6c6fc98ace87da9182dbaa
[ "Apache-2.0" ]
null
null
null
google/cloud/bigquery/samples/bigquery_client_mock_samples.bzl
nschonni/google-cloud-cpp
0f5e785f14fc21315d6c6fc98ace87da9182dbaa
[ "Apache-2.0" ]
1
2022-02-03T02:41:00.000Z
2022-02-03T02:41:00.000Z
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed """Automatically generated unit tests list - DO NOT EDIT.""" bigquery_client_mock_samples = [ "mock_bigquery_read.cc", ]
35.590909
79
0.752235
acedd9c83b8b4c051a989c11e0b6be07dc818d4e
4,996
py
Python
edb/edgeql/compiler/clauses.py
haikyuu/edgedb
73125882a4eff337692ad10af4bfdf15eef341ab
[ "Apache-2.0" ]
null
null
null
edb/edgeql/compiler/clauses.py
haikyuu/edgedb
73125882a4eff337692ad10af4bfdf15eef341ab
[ "Apache-2.0" ]
null
null
null
edb/edgeql/compiler/clauses.py
haikyuu/edgedb
73125882a4eff337692ad10af4bfdf15eef341ab
[ "Apache-2.0" ]
null
null
null
# # This source file is part of the EdgeDB open source project. # # Copyright 2008-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """EdgeQL compiler functions to process shared clauses.""" from __future__ import annotations from typing import * from edb.edgeql import ast as qlast from edb.ir import ast as irast from edb import errors from . import context from . import dispatch from . import inference from . import polyres from . import setgen from . import stmtctx def compile_where_clause( ir_stmt: irast.FilteredStmt, where: qlast.Base, *, ctx: context.ContextLevel) -> None: if where is None: return with ctx.newscope(fenced=True) as subctx: subctx.path_scope.unnest_fence = True ir_expr = dispatch.compile(where, ctx=subctx) bool_t = ctx.env.get_track_schema_type('std::bool') ir_set = setgen.scoped_set(ir_expr, typehint=bool_t, ctx=subctx) ir_stmt.where = ir_set stmtctx.get_expr_cardinality_later( target=ir_stmt, field='where_card', irexpr=ir_set, ctx=ctx) def compile_orderby_clause( sortexprs: Iterable[qlast.SortExpr], *, ctx: context.ContextLevel) -> List[irast.SortExpr]: result: List[irast.SortExpr] = [] if not sortexprs: return result with ctx.new() as subctx: for sortexpr in sortexprs: with subctx.newscope(fenced=True) as exprctx: exprctx.path_scope.unnest_fence = True ir_sortexpr = dispatch.compile(sortexpr.path, ctx=exprctx) ir_sortexpr = setgen.scoped_set( ir_sortexpr, force_reassign=True, ctx=exprctx) ir_sortexpr.context = sortexpr.context stmtctx.enforce_singleton(ir_sortexpr, ctx=exprctx) # Check that the sortexpr type is actually orderable # with either '>' or '<' based on the DESC or ASC sort # order. env = exprctx.env sort_type = inference.infer_type(ir_sortexpr, env) # Postgres by default treats ASC as using '<' and DESC # as using '>'. We should do the same. if sortexpr.direction == qlast.SortDesc: op_name = '>' else: op_name = '<' opers = env.schema.get_operators( op_name, module_aliases=exprctx.modaliases) # Verify that a comparison operator is defined for 2 # sort_type expressions. matched = polyres.find_callable( opers, args=[(sort_type, ir_sortexpr), (sort_type, ir_sortexpr)], kwargs={}, ctx=exprctx) if len(matched) != 1: sort_type_name = sort_type.material_type(env.schema) \ .get_displayname(env.schema) if len(matched) == 0: raise errors.QueryError( f'type {sort_type_name!r} cannot be used in ' f'ORDER BY clause because ordering is not ' f'defined for it', context=sortexpr.context) elif len(matched) > 1: raise errors.QueryError( f'type {sort_type_name!r} cannot be used in ' f'ORDER BY clause because ordering is ' f'ambiguous for it', context=sortexpr.context) result.append( irast.SortExpr( expr=ir_sortexpr, direction=sortexpr.direction, nones_order=sortexpr.nones_order)) return result def compile_limit_offset_clause( expr: qlast.Base, *, ctx: context.ContextLevel) -> Optional[irast.Set]: if expr is not None: with ctx.newscope(fenced=True) as subctx: ir_expr = dispatch.compile(expr, ctx=subctx) int_t = ctx.env.get_track_schema_type('std::int64') ir_set = setgen.scoped_set( ir_expr, force_reassign=True, typehint=int_t, ctx=subctx) ir_set.context = expr.context stmtctx.enforce_singleton(ir_set, ctx=subctx) else: ir_set = None return ir_set
35.685714
78
0.585268
aceddc659ddc57b073426dd284dbb6c38f6e7907
1,679
py
Python
TestSuit_SenSorData/ExpectResult/WorldPageClick.py
sdwfclcyk1/AutoTestCase
63a6a6a4acf2a9dc572bd917b186638eae65aee7
[ "MIT" ]
1
2018-09-28T11:35:07.000Z
2018-09-28T11:35:07.000Z
TestSuit_SenSorData/ExpectResult/WorldPageClick.py
sdwfclcyk1/AutoTestCase
63a6a6a4acf2a9dc572bd917b186638eae65aee7
[ "MIT" ]
null
null
null
TestSuit_SenSorData/ExpectResult/WorldPageClick.py
sdwfclcyk1/AutoTestCase
63a6a6a4acf2a9dc572bd917b186638eae65aee7
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/9/25 15:41 # @Author : Kay # @Site : # @File : WorldPageClick.py # @Software: PyCharm Community Edition class WorldClickExpection: def test_01_AttentionClick(self): expection = { "event": "WorldPageClick ", "ButtonName" :"关注Tab" } return expection def test_02_SearchClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "搜索" } return expection def test_03_ReacommendClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "推荐Tab" } return expection def test_04_VComunitClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "V们Tab" } return expection def test_05_SVideoClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "配音Tab" } def test_06_PostClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "帖子详情" } return expection def test_07_UserClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "个人详情" } return expection def test_08_LikeClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "点赞" } return expection def test_09_ReplyClick(self): expection = { "event": "WorldPageClick ", "ButtonName": "回复icon" } return expection
25.830769
39
0.514592
aceddd50e9410913494c1c80cffe1233ae80d343
174
py
Python
infrastructure-dashboard/dashboard/app.py
uk-gov-mirror/NHSX.covid-chest-imaging-database
77799a97193d09e9267182d18fbb79d604bbb038
[ "MIT" ]
56
2020-04-08T12:40:28.000Z
2021-10-02T22:57:16.000Z
infrastructure-dashboard/dashboard/app.py
uk-gov-mirror/NHSX.covid-chest-imaging-database
77799a97193d09e9267182d18fbb79d604bbb038
[ "MIT" ]
111
2020-04-02T13:23:06.000Z
2022-03-30T13:23:28.000Z
infrastructure-dashboard/dashboard/app.py
uk-gov-mirror/NHSX.covid-chest-imaging-database
77799a97193d09e9267182d18fbb79d604bbb038
[ "MIT" ]
10
2020-05-05T14:07:11.000Z
2022-01-11T15:47:27.000Z
#!/usr/bin/env python3 from aws_cdk import core from dashboard.dashboard_stack import DashboardStack app = core.App() DashboardStack(app, "nccid-dashboard") app.synth()
14.5
52
0.770115
aceddd57fd5519bddbd26624d47adef18b2e96a9
3,097
py
Python
castellan/tests/unit/test_options.py
openstack/castellan
ebafb3c656aa08329d05103ebbfd67c47c4c12a4
[ "Apache-2.0" ]
41
2015-01-02T09:38:05.000Z
2019-01-28T21:52:51.000Z
castellan/tests/unit/test_options.py
openstack/castellan
ebafb3c656aa08329d05103ebbfd67c47c4c12a4
[ "Apache-2.0" ]
null
null
null
castellan/tests/unit/test_options.py
openstack/castellan
ebafb3c656aa08329d05103ebbfd67c47c4c12a4
[ "Apache-2.0" ]
9
2015-05-18T03:40:00.000Z
2020-02-04T15:22:50.000Z
# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from castellan import key_manager from castellan.key_manager import barbican_key_manager as bkm from castellan import options from castellan.tests import base from castellan.tests.unit.key_manager import mock_key_manager class TestOptions(base.TestCase): def test_set_defaults(self): conf = cfg.ConfigOpts() self.assertTrue(isinstance(key_manager.API(conf), bkm.BarbicanKeyManager)) cls = mock_key_manager.MockKeyManager backend = '%s.%s' % (cls.__module__, cls.__name__) options.set_defaults(conf, backend=backend) self.assertEqual(backend, conf.key_manager.backend) self.assertIsInstance(key_manager.API(conf), mock_key_manager.MockKeyManager) barbican_endpoint = 'http://test-server.org:9311/' options.set_defaults(conf, barbican_endpoint=barbican_endpoint) self.assertEqual(barbican_endpoint, conf.barbican.barbican_endpoint) barbican_api_version = 'vSomething' options.set_defaults(conf, barbican_api_version=barbican_api_version) self.assertEqual(barbican_api_version, conf.barbican.barbican_api_version) auth_endpoint = 'http://test-server.org/identity' options.set_defaults(conf, auth_endpoint=auth_endpoint) self.assertEqual(auth_endpoint, conf.barbican.auth_endpoint) retry_delay = 3 options.set_defaults(conf, retry_delay=retry_delay) self.assertEqual(retry_delay, conf.barbican.retry_delay) number_of_retries = 10 options.set_defaults(conf, number_of_retries=number_of_retries) self.assertEqual(number_of_retries, conf.barbican.number_of_retries) verify_ssl = False options.set_defaults(conf, verify_ssl=False) self.assertEqual(verify_ssl, conf.barbican.verify_ssl) verify_ssl_path = '/mnt' options.set_defaults(conf, verify_ssl_path='/mnt') self.assertEqual(verify_ssl_path, conf.barbican.verify_ssl_path) barbican_endpoint_type = 'internal' options.set_defaults(conf, barbican_endpoint_type='internal') result_type = conf.barbican.barbican_endpoint_type self.assertEqual(barbican_endpoint_type, result_type)
39.202532
78
0.68195
aceddda9fafc5b9bb01736fc3befe5225808bb48
289
py
Python
tests/test_network.py
SickChill/python-tvmaze
8f47ae8becaa9238dd2c86abff34cc335b8f4709
[ "MIT" ]
null
null
null
tests/test_network.py
SickChill/python-tvmaze
8f47ae8becaa9238dd2c86abff34cc335b8f4709
[ "MIT" ]
null
null
null
tests/test_network.py
SickChill/python-tvmaze
8f47ae8becaa9238dd2c86abff34cc335b8f4709
[ "MIT" ]
null
null
null
import unittest from tvmaze.api import Api from tvmaze.models import Network class NetworkTests(unittest.TestCase): def setUp(self): self.api = Api() def test_network_get(self): network = self.api.network.get(1) self.assertIsInstance(network, Network)
19.266667
47
0.698962
acedddab8ebc6a090c84687905bded0a9eee10cf
27,297
py
Python
version/gallerydialog.py
mycropen/happypanda
680d8d6ad440bb87c2d4eeac9ae8aa5eaf1e4925
[ "Apache-2.0" ]
1
2022-03-29T14:44:59.000Z
2022-03-29T14:44:59.000Z
version/gallerydialog.py
mycropen/happypanda
680d8d6ad440bb87c2d4eeac9ae8aa5eaf1e4925
[ "Apache-2.0" ]
null
null
null
version/gallerydialog.py
mycropen/happypanda
680d8d6ad440bb87c2d4eeac9ae8aa5eaf1e4925
[ "Apache-2.0" ]
null
null
null
import queue, os, threading, random, logging, time, scandir from datetime import datetime from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QDesktopWidget, QGroupBox, QHBoxLayout, QFormLayout, QLabel, QLineEdit, QPushButton, QProgressBar, QTextEdit, QComboBox, QDateEdit, QFileDialog, QMessageBox, QScrollArea, QCheckBox, QSizePolicy, QSpinBox) from PyQt5.QtCore import (pyqtSignal, Qt, QPoint, QDate, QThread, QTimer) import app_constants import utils import gallerydb import fetch import misc import database import settings log = logging.getLogger(__name__) log_i = log.info log_d = log.debug log_w = log.warning log_e = log.error log_c = log.critical class GalleryDialog(QWidget): """ A window for adding/modifying gallery. Pass a list of QModelIndexes to edit their data or pass a path to preset path """ def __init__(self, parent, arg=None): super().__init__(parent, Qt.Dialog) self.setAttribute(Qt.WA_DeleteOnClose) self.setAutoFillBackground(True) self.parent_widget = parent m_l = QVBoxLayout() self.main_layout = QVBoxLayout() dummy = QWidget(self) scroll_area = QScrollArea(self) scroll_area.setWidgetResizable(True) scroll_area.setFrameStyle(scroll_area.StyledPanel) dummy.setLayout(self.main_layout) scroll_area.setWidget(dummy) m_l.addWidget(scroll_area, 3) final_buttons = QHBoxLayout() final_buttons.setAlignment(Qt.AlignRight) m_l.addLayout(final_buttons) self.done = QPushButton("Done") self.done.setDefault(True) self.cancel = QPushButton("Cancel") final_buttons.addWidget(self.cancel) final_buttons.addWidget(self.done) self._multiple_galleries = False self._edit_galleries = [] def new_gallery(): self.setWindowTitle('Add a new gallery') self.newUI() self.commonUI() self.done.clicked.connect(self.accept) self.cancel.clicked.connect(self.reject) if arg: if isinstance(arg, (list, gallerydb.Gallery)): if isinstance(arg, gallerydb.Gallery): self.setWindowTitle('Edit gallery') self._edit_galleries.append(arg) else: self.setWindowTitle('Edit {} galleries'.format(len(arg))) self._multiple_galleries = True self._edit_galleries.extend(arg) self.commonUI() self.setGallery(arg) self.done.clicked.connect(self.accept_edit) self.cancel.clicked.connect(self.reject_edit) elif isinstance(arg, str): new_gallery() self.choose_dir(arg) else: new_gallery() log_d('GalleryDialog: Create UI: successful') self.setLayout(m_l) if self._multiple_galleries: self.resize(500, 480) else: self.resize(500, 600) frect = self.frameGeometry() frect.moveCenter(QDesktopWidget().availableGeometry().center()) self.move(frect.topLeft()) self._fetch_inst = fetch.Fetch() self._fetch_thread = QThread(self) self._fetch_thread.setObjectName("GalleryDialog metadata thread") self._fetch_inst.moveToThread(self._fetch_thread) self._fetch_thread.started.connect(self._fetch_inst.auto_web_metadata) def commonUI(self): if not self._multiple_galleries: f_web = QGroupBox("Metadata from the Web") f_web.setCheckable(False) self.main_layout.addWidget(f_web) web_main_layout = QVBoxLayout() web_info = misc.ClickedLabel("Which gallery URLs are supported? (hover)", parent=self) web_info.setToolTip(app_constants.SUPPORTED_METADATA_URLS) web_info.setToolTipDuration(999999999) web_main_layout.addWidget(web_info) web_layout = QHBoxLayout() web_main_layout.addLayout(web_layout) f_web.setLayout(web_main_layout) def basic_web(name): return QLabel(name), QLineEdit(), QPushButton("Get metadata"), QProgressBar() url_lbl, self.url_edit, self.url_btn, self.url_prog = basic_web("URL:") self.url_btn.clicked.connect(lambda: self.web_metadata(self.url_edit.text(), self.url_btn, self.url_prog)) self.url_prog.setTextVisible(False) self.url_prog.setMinimum(0) self.url_prog.setMaximum(0) web_layout.addWidget(url_lbl, 0, Qt.AlignLeft) web_layout.addWidget(self.url_edit, 0) web_layout.addWidget(self.url_btn, 0, Qt.AlignRight) web_layout.addWidget(self.url_prog, 0, Qt.AlignRight) self.url_edit.setPlaceholderText("Insert supported gallery URLs or just press the button!") self.url_prog.hide() f_gallery = QGroupBox("Gallery Info") f_gallery.setCheckable(False) self.main_layout.addWidget(f_gallery) gallery_layout = QFormLayout() f_gallery.setLayout(gallery_layout) def checkbox_layout(widget): if self._multiple_galleries: l = QHBoxLayout() l.addWidget(widget.g_check) widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) l.addWidget(widget) return l else: widget.g_check.setChecked(True) widget.g_check.hide() return widget def add_check(widget): widget.g_check = QCheckBox(self) return widget self.title_edit = add_check(QLineEdit()) self.author_edit = add_check(QLineEdit()) author_completer = misc.GCompleter(self, False, True, False) author_completer.setCaseSensitivity(Qt.CaseInsensitive) self.author_edit.setCompleter(author_completer) self.descr_edit = add_check(QTextEdit()) self.descr_edit.setAcceptRichText(True) self.lang_box = add_check(QComboBox()) self.lang_box.addItems(app_constants.G_LANGUAGES) self.lang_box.addItems(app_constants.G_CUSTOM_LANGUAGES) self.rating_box = add_check(QSpinBox()) self.rating_box.setMaximum(5) self.rating_box.setMinimum(0) self._find_combobox_match(self.lang_box, app_constants.G_DEF_LANGUAGE, 0) tags_l = QVBoxLayout() tag_info = misc.ClickedLabel("How do i write namespace & tags? (hover)", parent=self) tag_info.setToolTip("Ways to write tags:\n\nNormal tags:\ntag1, tag2, tag3\n\n"+ "Namespaced tags:\nns1:tag1, ns1:tag2\n\nNamespaced tags with one or more"+ " tags under same namespace:\nns1:[tag1, tag2, tag3], ns2:[tag1, tag2]\n\n"+ "Those three ways of writing namespace & tags can be combined freely.\n"+ "Tags are seperated by a comma, NOT whitespace.\nNamespaces will be capitalized while tags"+ " will be lowercased.") tag_info.setToolTipDuration(99999999) tags_l.addWidget(tag_info) self.tags_edit = add_check(misc.CompleterTextEdit()) self.tags_edit.setCompleter(misc.GCompleter(self, False, False)) self.tags_append = QCheckBox("Append tags", self) self.tags_append.setChecked(False) if not self._multiple_galleries: self.tags_append.hide() if self._multiple_galleries: self.tags_append.setChecked(app_constants.APPEND_TAGS_GALLERIES) tags_ml = QVBoxLayout() tags_ml.addWidget(self.tags_append) tags_ml.addLayout(checkbox_layout(self.tags_edit), 5) tags_l.addLayout(tags_ml, 3) else: tags_l.addWidget(checkbox_layout(self.tags_edit), 5) self.tags_edit.setPlaceholderText("Press Tab to autocomplete (Ctrl + E to show popup)") self.type_box = add_check(QComboBox()) self.type_box.addItems(app_constants.G_TYPES) self._find_combobox_match(self.type_box, app_constants.G_DEF_TYPE, 0) #self.type_box.currentIndexChanged[int].connect(self.doujin_show) #self.doujin_parent = QLineEdit() #self.doujin_parent.setVisible(False) self.status_box = add_check(QComboBox()) self.status_box.addItems(app_constants.G_STATUS) self._find_combobox_match(self.status_box, app_constants.G_DEF_STATUS, 0) self.pub_edit = add_check(QDateEdit()) self.pub_edit.setCalendarPopup(True) self.pub_edit.setDate(QDate.currentDate()) self.path_lbl = misc.ClickedLabel("") self.path_lbl.setWordWrap(True) self.path_lbl.clicked.connect(lambda a: utils.open_path(a, a) if a else None) link_layout = QHBoxLayout() self.link_lbl = add_check(QLabel("")) self.link_lbl.setWordWrap(True) self.link_edit = QLineEdit() link_layout.addWidget(self.link_edit) if self._multiple_galleries: link_layout.addLayout(checkbox_layout(self.link_lbl)) else: link_layout.addWidget(checkbox_layout(self.link_lbl)) self.link_edit.hide() self.link_btn = QPushButton("Modify") self.link_btn.setFixedWidth(50) self.link_btn2 = QPushButton("Set") self.link_btn2.setFixedWidth(40) self.link_btn.clicked.connect(self.link_modify) self.link_btn2.clicked.connect(self.link_set) link_layout.addWidget(self.link_btn) link_layout.addWidget(self.link_btn2) self.link_btn2.hide() rating_ = checkbox_layout(self.rating_box) lang_ = checkbox_layout(self.lang_box) if self._multiple_galleries: rating_.insertWidget(0, QLabel("Rating:")) lang_.addLayout(rating_) lang_l = lang_ else: lang_l = QHBoxLayout() lang_l.addWidget(lang_) lang_l.addWidget(QLabel("Rating:"), 0, Qt.AlignRight) lang_l.addWidget(rating_) gallery_layout.addRow("Title:", checkbox_layout(self.title_edit)) gallery_layout.addRow("Author:", checkbox_layout(self.author_edit)) gallery_layout.addRow("Description:", checkbox_layout(self.descr_edit)) gallery_layout.addRow("Language:", lang_l) gallery_layout.addRow("Tags:", tags_l) gallery_layout.addRow("Type:", checkbox_layout(self.type_box)) gallery_layout.addRow("Status:", checkbox_layout(self.status_box)) gallery_layout.addRow("Publication Date:", checkbox_layout(self.pub_edit)) gallery_layout.addRow("Path:", self.path_lbl) gallery_layout.addRow("URL:", link_layout) self.title_edit.setFocus() def resizeEvent(self, event): self.tags_edit.setFixedHeight(event.size().height()//8) self.descr_edit.setFixedHeight(event.size().height()//12.5) return super().resizeEvent(event) def _find_combobox_match(self, combobox, key, default): f_index = combobox.findText(key, Qt.MatchFixedString) if f_index != -1: combobox.setCurrentIndex(f_index) return True else: combobox.setCurrentIndex(default) return False def setGallery(self, gallery): "To be used for when editing a gallery" if isinstance(gallery, gallerydb.Gallery): self.gallery = gallery if not self._multiple_galleries: self.url_edit.setText(gallery.link) self.title_edit.setText(gallery.title) self.author_edit.setText(gallery.artist) self.descr_edit.setText(gallery.info) self.rating_box.setValue(gallery.rating) self.tags_edit.setText(utils.tag_to_string(gallery.tags)) if not self._find_combobox_match(self.lang_box, gallery.language, 1): self._find_combobox_match(self.lang_box, app_constants.G_DEF_LANGUAGE, 1) if not self._find_combobox_match(self.type_box, gallery.type, 0): self._find_combobox_match(self.type_box, app_constants.G_DEF_TYPE, 0) if not self._find_combobox_match(self.status_box, gallery.status, 0): self._find_combobox_match(self.status_box, app_constants.G_DEF_STATUS, 0) gallery_pub_date = "{}".format(gallery.pub_date).split(' ') try: self.gallery_time = datetime.strptime(gallery_pub_date[1], '%H:%M:%S').time() except IndexError: pass qdate_pub_date = QDate.fromString(gallery_pub_date[0], "yyyy-MM-dd") self.pub_edit.setDate(qdate_pub_date) self.link_lbl.setText(gallery.link) self.path_lbl.setText(gallery.path) elif isinstance(gallery, list): g = gallery[0] if all(map(lambda x: x.title == g.title, gallery)): self.title_edit.setText(g.title) self.title_edit.g_check.setChecked(True) if all(map(lambda x: x.artist == g.artist, gallery)): self.author_edit.setText(g.artist) self.author_edit.g_check.setChecked(True) if all(map(lambda x: x.info == g.info, gallery)): self.descr_edit.setText(g.info) self.descr_edit.g_check.setChecked(True) if all(map(lambda x: x.tags == g.tags, gallery)): self.tags_edit.setText(utils.tag_to_string(g.tags)) self.tags_edit.g_check.setChecked(True) if all(map(lambda x: x.language == g.language, gallery)): if not self._find_combobox_match(self.lang_box, g.language, 1): self._find_combobox_match(self.lang_box, app_constants.G_DEF_LANGUAGE, 1) self.lang_box.g_check.setChecked(True) if all(map(lambda x: x.rating == g.rating, gallery)): self.rating_box.setValue(g.rating) self.rating_box.g_check.setChecked(True) if all(map(lambda x: x.type == g.type, gallery)): if not self._find_combobox_match(self.type_box, g.type, 0): self._find_combobox_match(self.type_box, app_constants.G_DEF_TYPE, 0) self.type_box.g_check.setChecked(True) if all(map(lambda x: x.status == g.status, gallery)): if not self._find_combobox_match(self.status_box, g.status, 0): self._find_combobox_match(self.status_box, app_constants.G_DEF_STATUS, 0) self.status_box.g_check.setChecked(True) if all(map(lambda x: x.pub_date == g.pub_date, gallery)): gallery_pub_date = "{}".format(g.pub_date).split(' ') try: self.gallery_time = datetime.strptime(gallery_pub_date[1], '%H:%M:%S').time() except IndexError: pass qdate_pub_date = QDate.fromString(gallery_pub_date[0], "yyyy-MM-dd") self.pub_edit.setDate(qdate_pub_date) self.pub_edit.g_check.setChecked(True) if all(map(lambda x: x.link == g.link, gallery)): self.link_lbl.setText(g.link) self.link_lbl.g_check.setChecked(True) def newUI(self): f_local = QGroupBox("Directory/Archive") f_local.setCheckable(False) self.main_layout.addWidget(f_local) local_layout = QHBoxLayout() f_local.setLayout(local_layout) choose_folder = QPushButton("From Directory") choose_folder.clicked.connect(lambda: self.choose_dir('f')) local_layout.addWidget(choose_folder) choose_archive = QPushButton("From Archive") choose_archive.clicked.connect(lambda: self.choose_dir('a')) local_layout.addWidget(choose_archive) self.file_exists_lbl = QLabel() local_layout.addWidget(self.file_exists_lbl) self.file_exists_lbl.hide() def choose_dir(self, mode): """ Pass which mode to open the folder explorer in: 'f': directory 'a': files Or pass a predefined path """ self.done.show() self.file_exists_lbl.hide() if mode == 'a': name = QFileDialog.getOpenFileName(self, 'Choose archive', filter=utils.FILE_FILTER) name = name[0] elif mode == 'f': name = QFileDialog.getExistingDirectory(self, 'Choose folder') elif mode: if os.path.exists(mode): name = mode else: return None if not name: return head, tail = os.path.split(name) name = os.path.join(head, tail) parsed = utils.title_parser(tail) self.title_edit.setText(parsed['title']) self.author_edit.setText(parsed['artist']) self.path_lbl.setText(name) if not parsed['language']: parsed['language'] = app_constants.G_DEF_LANGUAGE l_i = self.lang_box.findText(parsed['language']) if l_i != -1: self.lang_box.setCurrentIndex(l_i) if gallerydb.GalleryDB.check_exists(name): self.file_exists_lbl.setText('<font color="red">Gallery already exists.</font>') self.file_exists_lbl.show() # check galleries gs = 1 if name.endswith(utils.ARCHIVE_FILES): gs = len(utils.check_archive(name)) elif os.path.isdir(name): g_dirs, g_archs = utils.recursive_gallery_check(name) gs = len(g_dirs) + len(g_archs) if gs == 0: self.file_exists_lbl.setText('<font color="red">Invalid gallery source.</font>') self.file_exists_lbl.show() self.done.hide() if app_constants.SUBFOLDER_AS_GALLERY: if gs > 1: self.file_exists_lbl.setText('<font color="red">More than one galleries detected in source! Use other methods to add.</font>') self.file_exists_lbl.show() self.done.hide() def check(self): if not self._multiple_galleries: if len(self.title_edit.text()) == 0: self.title_edit.setFocus() self.title_edit.setStyleSheet("border-style:outset;border-width:2px;border-color:red;") return False elif len(self.author_edit.text()) == 0: self.author_edit.setText("Unknown") if len(self.path_lbl.text()) == 0 or self.path_lbl.text() == 'No path specified': self.path_lbl.setStyleSheet("color:red") self.path_lbl.setText('No path specified') return False return True def reject(self): if self.check(): msgbox = QMessageBox() msgbox.setText("<font color='red'><b>Noo oniichan! You were about to add a new gallery.</b></font>") msgbox.setInformativeText("Do you really want to discard?") msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No) msgbox.setDefaultButton(QMessageBox.No) if msgbox.exec() == QMessageBox.Yes: self.close() else: self.close() def web_metadata(self, url, btn_widget, pgr_widget): if not self.path_lbl.text(): return self.link_lbl.setText(url) btn_widget.hide() pgr_widget.show() def status(stat): def do_hide(): try: pgr_widget.hide() btn_widget.show() except RuntimeError: pass if stat: do_hide() else: danger = """QProgressBar::chunk { background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0,stop: 0 #FF0350,stop: 0.4999 #FF0020,stop: 0.5 #FF0019,stop: 1 #FF0000 ); border-bottom-right-radius: 5px; border-bottom-left-radius: 5px; border: .px solid black;}""" pgr_widget.setStyleSheet(danger) QTimer.singleShot(3000, do_hide) def gallery_picker(gallery, title_url_list, q): self.parent_widget._web_metadata_picker(gallery, title_url_list, q, self) try: dummy_gallery = self.make_gallery(self.gallery, False) except AttributeError: dummy_gallery = self.make_gallery(gallerydb.Gallery(), False, True) if not dummy_gallery: status(False) return None dummy_gallery._g_dialog_url = url self._fetch_inst.galleries = [dummy_gallery] self._disconnect() self._fetch_inst.GALLERY_PICKER.connect(gallery_picker) self._fetch_inst.GALLERY_EMITTER.connect(self.set_web_metadata) self._fetch_inst.FINISHED.connect(status) self._fetch_thread.start() def set_web_metadata(self, metadata): assert isinstance(metadata, gallerydb.Gallery) self.link_lbl.setText(metadata.link) self.title_edit.setText(metadata.title) self.author_edit.setText(metadata.artist) tags = "" lang = ['English', 'Japanese'] self._find_combobox_match(self.lang_box, metadata.language, 2) self.tags_edit.setText(utils.tag_to_string(metadata.tags)) pub_string = "{}".format(metadata.pub_date) pub_date = QDate.fromString(pub_string.split()[0], "yyyy-MM-dd") self.pub_edit.setDate(pub_date) self._find_combobox_match(self.type_box, metadata.type, 0) def make_gallery(self, new_gallery, add_to_model=True, new=False): def is_checked(widget): return widget.g_check.isChecked() if self.check(): if is_checked(self.title_edit): new_gallery.title = self.title_edit.text() log_d('Adding gallery title') if is_checked(self.author_edit): new_gallery.artist = self.author_edit.text() log_d('Adding gallery artist') if not self._multiple_galleries: new_gallery.path = self.path_lbl.text() log_d('Adding gallery path') if is_checked(self.descr_edit): new_gallery.info = self.descr_edit.toPlainText() log_d('Adding gallery descr') if is_checked(self.type_box): new_gallery.type = self.type_box.currentText() log_d('Adding gallery type') if is_checked(self.lang_box): new_gallery.language = self.lang_box.currentText() log_d('Adding gallery lang') if is_checked(self.rating_box): new_gallery.rating = self.rating_box.value() log_d('Adding gallery rating') if is_checked(self.status_box): new_gallery.status = self.status_box.currentText() log_d('Adding gallery status') if is_checked(self.tags_edit): if self.tags_append.isChecked(): new_gallery.tags = utils.tag_to_dict(utils.tag_to_string(new_gallery.tags)+","+ self.tags_edit.toPlainText()) else: new_gallery.tags = utils.tag_to_dict(self.tags_edit.toPlainText()) log_d('Adding gallery: tagging to dict') if is_checked(self.pub_edit): qpub_d = self.pub_edit.date().toString("ddMMyyyy") dpub_d = datetime.strptime(qpub_d, "%d%m%Y").date() try: d_t = self.gallery_time except AttributeError: d_t = datetime.now().time().replace(microsecond=0) dpub_d = datetime.combine(dpub_d, d_t) new_gallery.pub_date = dpub_d log_d('Adding gallery pub date') if is_checked(self.link_lbl): new_gallery.link = self.link_lbl.text() log_d('Adding gallery link') if new: if not new_gallery.chapters: log_d('Starting chapters') thread = threading.Thread(target=utils.make_chapters, args=(new_gallery,)) thread.start() thread.join() log_d('Finished chapters') if new and app_constants.MOVE_IMPORTED_GALLERIES: app_constants.OVERRIDE_MONITOR = True new_gallery.move_gallery() if add_to_model: self.parent_widget.default_manga_view.add_gallery(new_gallery, True) log_i('Sent gallery to model') else: if add_to_model: self.parent_widget.default_manga_view.replace_gallery([new_gallery], False) return new_gallery def link_set(self): t = self.link_edit.text() self.link_edit.hide() self.link_lbl.show() self.link_lbl.setText(t) self.link_btn2.hide() self.link_btn.show() def link_modify(self): t = self.link_lbl.text() self.link_lbl.hide() self.link_edit.show() self.link_edit.setText(t) self.link_btn.hide() self.link_btn2.show() def _disconnect(self): try: self._fetch_inst.GALLERY_PICKER.disconnect() self._fetch_inst.GALLERY_EMITTER.disconnect() self._fetch_inst.FINISHED.disconnect() except TypeError: pass def delayed_close(self): if self._fetch_thread.isRunning(): self._fetch_thread.finished.connect(self.close) self.hide() else: self.close() def accept(self): self.make_gallery(gallerydb.Gallery(), new=True) self.delayed_close() def accept_edit(self): gallerydb.execute(database.db.DBBase.begin, True) app_constants.APPEND_TAGS_GALLERIES = self.tags_append.isChecked() settings.set(app_constants.APPEND_TAGS_GALLERIES, 'Application', 'append tags to gallery') for g in self._edit_galleries: self.make_gallery(g) self.delayed_close() gallerydb.execute(database.db.DBBase.end, True) def reject_edit(self): self.delayed_close() def keyPressEvent(self, event): # Return: # When url_edit is in focus: click url_btn # else when anything but descr_edit or tags_edit is in focus: accept_edit # Escape: # reject_edit if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter: if self.url_edit.hasFocus(): self.web_metadata(self.url_edit.text(), self.url_btn, self.url_prog) elif not self.descr_edit.hasFocus() and not self.tags_edit.hasFocus(): self.done.click() elif event.key() == Qt.Key_Escape: self.cancel.click() return super().keyPressEvent(event)
43.397456
149
0.609847
acedddd4b6bfc9b63a4e154bde3a834ca8982216
202
py
Python
pie_sunburst_treemaps/tips_sunburst.py
thefullstackninja/plotly_express_tutorial
23b706269d997058197db9600c4ec953e89ab5e2
[ "MIT" ]
null
null
null
pie_sunburst_treemaps/tips_sunburst.py
thefullstackninja/plotly_express_tutorial
23b706269d997058197db9600c4ec953e89ab5e2
[ "MIT" ]
null
null
null
pie_sunburst_treemaps/tips_sunburst.py
thefullstackninja/plotly_express_tutorial
23b706269d997058197db9600c4ec953e89ab5e2
[ "MIT" ]
1
2022-01-08T08:33:27.000Z
2022-01-08T08:33:27.000Z
import pandas as pd import plotly.express as px df = pd.read_csv("../data/tips.csv") plot = px.sunburst( data_frame=df, path=['day','time','sex','smoker'], values='tip', ) plot.show()
12.625
39
0.618812
aceddddd7c2e445721bce5db924cafd98c24d3b3
308
py
Python
month03.2/django/day05/mysitel3/mtm/admin.py
Amiao-miao/all-codes
ec50036d42d40086cac5fddf6baf4de18ac91e55
[ "Apache-2.0" ]
1
2021-02-02T02:17:37.000Z
2021-02-02T02:17:37.000Z
month03.2/django/day05/mysitel3/mtm/admin.py
Amiao-miao/all-codes
ec50036d42d40086cac5fddf6baf4de18ac91e55
[ "Apache-2.0" ]
null
null
null
month03.2/django/day05/mysitel3/mtm/admin.py
Amiao-miao/all-codes
ec50036d42d40086cac5fddf6baf4de18ac91e55
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from .models import * # Register your models here. class AuthorManager(admin.ModelAdmin): list_display = ['id','name'] class BookManager(admin.ModelAdmin): list_display = ['id','title'] admin.site.register(Author,AuthorManager) admin.site.register(Book,BookManager)
28
41
0.762987
acedde0004759873557172fbef7d9ce3c0cabba7
6,542
py
Python
satellighte/archs/mobilenetv2/module.py
canturan10/satellighte
09c906fdf8b538296c488d0c2d86c344007f8666
[ "MIT" ]
null
null
null
satellighte/archs/mobilenetv2/module.py
canturan10/satellighte
09c906fdf8b538296c488d0c2d86c344007f8666
[ "MIT" ]
null
null
null
satellighte/archs/mobilenetv2/module.py
canturan10/satellighte
09c906fdf8b538296c488d0c2d86c344007f8666
[ "MIT" ]
null
null
null
import os from typing import Dict, List import torch import torch.nn as nn import torchvision from .blocks.mobilenetv2 import MobileNet_V2 class MobileNetV2(nn.Module): """ Implementation of MobileNetV2: Inverted Residuals and Linear Bottlenecks The network is the one described in arxiv.org/abs/1801.04381v4 . """ # pylint: disable=no-member __CONFIGS__ = { "default": { "input": { "input_size": 224, "normalized_input": True, "mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225], }, "model": { "width_mult": 1.0, "inverted_residual_setting": None, "round_nearest": 8, "block": None, "norm_layer": None, "dropout": 0.5, "pretrained": False, }, }, } def __init__( self, config: Dict, labels: List[str] = None, ): super().__init__() self.config = config self.labels = labels self.num_classes = len(self.labels) self.backbone = MobileNet_V2( num_classes=self.num_classes, width_mult=self.config["model"]["width_mult"], inverted_residual_setting=self.config["model"]["inverted_residual_setting"], round_nearest=self.config["model"]["round_nearest"], block=self.config["model"]["block"], norm_layer=self.config["model"]["norm_layer"], dropout=self.config["model"]["dropout"], ) if self.config["model"]["pretrained"]: self.backbone.load_state_dict( torchvision.models.mobilenet_v2(pretrained=True).state_dict(), strict=False, ) def forward(self, inputs): """ Forward pass of the model. """ return self.backbone(inputs) def logits_to_preds(self, logits: List[torch.Tensor]): """ Convert logits to predictions. """ return torch.softmax(logits, axis=-1) @classmethod def build( cls, config: str = "", labels: List[str] = None, **kwargs, ) -> nn.Module: """ Build the model with random weights. Args: config (str, optional): Configuration name. Defaults to "". labels (List[str], optional): List of labels. Defaults to None. Returns: nn.Module: Model with random weights. """ # return model with random weight initialization labels = ["cls1", "cls2"] if labels is None else labels return cls( config=MobileNetV2.__CONFIGS__[config], labels=labels, **kwargs, ) @classmethod def from_pretrained( cls, model_path: str, config: str, *args, **kwargs, ) -> nn.Module: """ Load a model from a pre-trained model. Args: model_path (str): Path to the pre-trained model config (str): Configuration of the model Returns: nn.Module: Model with pretrained weights """ *_, full_model_name, _ = model_path.split(os.path.sep) s_dict = torch.load( os.path.join(model_path, f"{full_model_name}.pth"), map_location="cpu" ) model = cls( config=MobileNetV2.__CONFIGS__[config], labels=s_dict["labels"], *args, **kwargs, ) model.load_state_dict(s_dict["state_dict"], strict=True) return model def compute_loss( self, logits: List[torch.Tensor], targets: List, hparams: Dict, ): """ Compute the loss for the model. Args: logits (List[torch.Tensor]): _description_ targets (List): _description_ hparams (Dict, optional): _description_. Defaults to {}. Raises: ValueError: Unknown criterion Returns: Loss """ if hparams.get("criterion", "cross_entropy") == "cross_entropy": loss_fcn = nn.CrossEntropyLoss() else: raise ValueError("Unknown criterion") return {"loss": loss_fcn(logits, targets)} def configure_optimizers(self, hparams: Dict): """ Configure optimizers for the model. Args: hparams (Dict): Hyperparameters Raises: ValueError: Unknown optimizer ValueError: Unknown scheduler Returns: optimizers and scheduler """ hparams_optimizer = hparams.get("optimizer", "sgd") if hparams_optimizer == "sgd": optimizer = torch.optim.SGD( self.parameters(), lr=hparams.get("learning_rate", 1e-1), momentum=hparams.get("momentum", 0.9), weight_decay=hparams.get("weight_decay", 1e-5), ) elif hparams_optimizer == "adam": optimizer = torch.optim.Adam( self.parameters(), lr=hparams.get("learning_rate", 1e-1), betas=hparams.get("betas", (0.9, 0.999)), eps=hparams.get("eps", 1e-08), weight_decay=hparams.get("weight_decay", 1e-5), ) elif hparams_optimizer == "adamw": optimizer = torch.optim.AdamW( self.parameters(), lr=hparams.get("learning_rate", 1e-1), betas=hparams.get("betas", (0.9, 0.999)), eps=hparams.get("eps", 1e-08), weight_decay=hparams.get("weight_decay", 1e-5), ) else: raise ValueError("Unknown optimizer") hparams_scheduler = hparams.get("scheduler", "steplr") if hparams_scheduler == "steplr": scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=hparams.get("step_size", 4), gamma=hparams.get("gamma", 0.5), ) elif hparams_scheduler == "multisteplr": scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, gamma=hparams.get("gamma", 0.5), milestones=hparams.get("milestones", [500000, 1000000, 1500000]), ) else: raise ValueError("Unknown scheduler") return [optimizer], [scheduler]
29.336323
88
0.530725
aceddefe64ff0a72ff3445a7b2dc1ff371112f89
9,045
py
Python
contrib/tools/cython/Cython/Compiler/TreeFragment.py
SitdikovRustam/CatBoost
39fb9dfddb24e977ed87efc71063b03cd4bc8f16
[ "Apache-2.0" ]
652
2015-07-26T00:00:17.000Z
2022-02-24T18:30:04.000Z
contrib/tools/cython/Cython/Compiler/TreeFragment.py
dsferz/machinelearning_yandex
8fde8314c5c70299ece8b8f00075ddfcd5e07ddf
[ "Apache-2.0" ]
8
2015-09-07T03:38:19.000Z
2021-05-23T03:18:51.000Z
contrib/tools/cython/Cython/Compiler/TreeFragment.py
dsferz/machinelearning_yandex
8fde8314c5c70299ece8b8f00075ddfcd5e07ddf
[ "Apache-2.0" ]
40
2015-07-24T19:45:08.000Z
2021-11-01T14:54:56.000Z
# # TreeFragments - parsing of strings to trees # """ Support for parsing strings into code trees. """ from __future__ import absolute_import import re from io import StringIO from .Scanning import PyrexScanner, StringSourceDescriptor from .Symtab import ModuleScope from . import PyrexTypes from .Visitor import VisitorTransform from .Nodes import Node, StatListNode from .ExprNodes import NameNode from .StringEncoding import _unicode from . import Parsing from . import Main from . import UtilNodes class StringParseContext(Main.Context): def __init__(self, name, include_directories=None, compiler_directives=None): if include_directories is None: include_directories = [] if compiler_directives is None: compiler_directives = {} Main.Context.__init__(self, include_directories, compiler_directives, create_testscope=False) self.module_name = name def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, absolute_fallback=True): if module_name not in (self.module_name, 'cython'): raise AssertionError("Not yet supporting any cimports/includes from string code snippets") return ModuleScope(module_name, parent_module=None, context=self) def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None, context=None, allow_struct_enum_decorator=False): """ Utility method to parse a (unicode) string of code. This is mostly used for internal Cython compiler purposes (creating code snippets that transforms should emit, as well as unit testing). code - a unicode string containing Cython (module-level) code name - a descriptive name for the code source (to use in error messages etc.) RETURNS The tree, i.e. a ModuleNode. The ModuleNode's scope attribute is set to the scope used when parsing. """ if context is None: context = StringParseContext(name) # Since source files carry an encoding, it makes sense in this context # to use a unicode string so that code fragments don't have to bother # with encoding. This means that test code passed in should not have an # encoding header. assert isinstance(code, _unicode), "unicode code snippets only please" encoding = "UTF-8" module_name = name if initial_pos is None: initial_pos = (name, 1, 0) code_source = StringSourceDescriptor(name, code) scope = context.find_module(module_name, pos=initial_pos, need_pxd=False) buf = StringIO(code) scanner = PyrexScanner(buf, code_source, source_encoding = encoding, scope = scope, context = context, initial_pos = initial_pos) ctx = Parsing.Ctx(allow_struct_enum_decorator=allow_struct_enum_decorator) if level is None: tree = Parsing.p_module(scanner, 0, module_name, ctx=ctx) tree.scope = scope tree.is_pxd = False else: tree = Parsing.p_code(scanner, level=level, ctx=ctx) tree.scope = scope return tree class TreeCopier(VisitorTransform): def visit_Node(self, node): if node is None: return node else: c = node.clone_node() self.visitchildren(c) return c class ApplyPositionAndCopy(TreeCopier): def __init__(self, pos): super(ApplyPositionAndCopy, self).__init__() self.pos = pos def visit_Node(self, node): copy = super(ApplyPositionAndCopy, self).visit_Node(node) copy.pos = self.pos return copy class TemplateTransform(VisitorTransform): """ Makes a copy of a template tree while doing substitutions. A dictionary "substitutions" should be passed in when calling the transform; mapping names to replacement nodes. Then replacement happens like this: - If an ExprStatNode contains a single NameNode, whose name is a key in the substitutions dictionary, the ExprStatNode is replaced with a copy of the tree given in the dictionary. It is the responsibility of the caller that the replacement node is a valid statement. - If a single NameNode is otherwise encountered, it is replaced if its name is listed in the substitutions dictionary in the same way. It is the responsibility of the caller to make sure that the replacement nodes is a valid expression. Also a list "temps" should be passed. Any names listed will be transformed into anonymous, temporary names. Currently supported for tempnames is: NameNode (various function and class definition nodes etc. should be added to this) Each replacement node gets the position of the substituted node recursively applied to every member node. """ temp_name_counter = 0 def __call__(self, node, substitutions, temps, pos): self.substitutions = substitutions self.pos = pos tempmap = {} temphandles = [] for temp in temps: TemplateTransform.temp_name_counter += 1 handle = UtilNodes.TempHandle(PyrexTypes.py_object_type) tempmap[temp] = handle temphandles.append(handle) self.tempmap = tempmap result = super(TemplateTransform, self).__call__(node) if temps: result = UtilNodes.TempsBlockNode(self.get_pos(node), temps=temphandles, body=result) return result def get_pos(self, node): if self.pos: return self.pos else: return node.pos def visit_Node(self, node): if node is None: return None else: c = node.clone_node() if self.pos is not None: c.pos = self.pos self.visitchildren(c) return c def try_substitution(self, node, key): sub = self.substitutions.get(key) if sub is not None: pos = self.pos if pos is None: pos = node.pos return ApplyPositionAndCopy(pos)(sub) else: return self.visit_Node(node) # make copy as usual def visit_NameNode(self, node): temphandle = self.tempmap.get(node.name) if temphandle: # Replace name with temporary return temphandle.ref(self.get_pos(node)) else: return self.try_substitution(node, node.name) def visit_ExprStatNode(self, node): # If an expression-as-statement consists of only a replaceable # NameNode, we replace the entire statement, not only the NameNode if isinstance(node.expr, NameNode): return self.try_substitution(node, node.expr.name) else: return self.visit_Node(node) def copy_code_tree(node): return TreeCopier()(node) _match_indent = re.compile(u"^ *").match def strip_common_indent(lines): """Strips empty lines and common indentation from the list of strings given in lines""" # TODO: Facilitate textwrap.indent instead lines = [x for x in lines if x.strip() != u""] minindent = min([len(_match_indent(x).group(0)) for x in lines]) lines = [x[minindent:] for x in lines] return lines class TreeFragment(object): def __init__(self, code, name=None, pxds={}, temps=[], pipeline=[], level=None, initial_pos=None): if not name: name = "(tree fragment)" if isinstance(code, _unicode): def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n"))) fmt_code = fmt(code) fmt_pxds = {} for key, value in pxds.items(): fmt_pxds[key] = fmt(value) mod = t = parse_from_strings(name, fmt_code, fmt_pxds, level=level, initial_pos=initial_pos) if level is None: t = t.body # Make sure a StatListNode is at the top if not isinstance(t, StatListNode): t = StatListNode(pos=mod.pos, stats=[t]) for transform in pipeline: if transform is None: continue t = transform(t) self.root = t elif isinstance(code, Node): if pxds != {}: raise NotImplementedError() self.root = code else: raise ValueError("Unrecognized code format (accepts unicode and Node)") self.temps = temps def copy(self): return copy_code_tree(self.root) def substitute(self, nodes={}, temps=[], pos = None): return TemplateTransform()(self.root, substitutions = nodes, temps = self.temps + temps, pos = pos) class SetPosTransform(VisitorTransform): def __init__(self, pos): super(SetPosTransform, self).__init__() self.pos = pos def visit_Node(self, node): node.pos = self.pos self.visitchildren(node) return node
34.92278
104
0.640133
aceddf83543ccaa82a9be00a7aaa6092022f100d
639
py
Python
azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/__init__.py
Christina-Kang/azure-sdk-for-python
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
[ "MIT" ]
1
2022-03-30T22:39:15.000Z
2022-03-30T22:39:15.000Z
azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/__init__.py
Christina-Kang/azure-sdk-for-python
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
[ "MIT" ]
54
2016-03-25T17:25:01.000Z
2018-10-22T17:27:54.000Z
azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/operations/__init__.py
xiafu-msft/azure-sdk-for-python
4d9560cfd519ee60667f3cc2f5295a58c18625db
[ "MIT" ]
2
2017-01-20T18:25:46.000Z
2017-05-12T21:31:47.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .disks_operations import DisksOperations from .snapshots_operations import SnapshotsOperations __all__ = [ 'DisksOperations', 'SnapshotsOperations', ]
33.631579
76
0.589984
acede05869ad00de8db2c73d19858cc0f1db6520
4,679
py
Python
build_files/buildbot/buildbot_utils.py
pragma37/Blender-OctoBuildBot
aa05883b10dc8598b36778ecb7434e0c1825d438
[ "Naumen", "Condor-1.1", "MS-PL" ]
3
2020-08-07T11:31:57.000Z
2021-07-21T01:55:56.000Z
build_files/buildbot/buildbot_utils.py
pragma37/Blender-OctoBuildBot
aa05883b10dc8598b36778ecb7434e0c1825d438
[ "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
build_files/buildbot/buildbot_utils.py
pragma37/Blender-OctoBuildBot
aa05883b10dc8598b36778ecb7434e0c1825d438
[ "Naumen", "Condor-1.1", "MS-PL" ]
2
2021-06-24T01:23:22.000Z
2021-08-07T22:03:41.000Z
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> import argparse import os import re import subprocess import sys def is_tool(name): """Check whether `name` is on PATH and marked as executable.""" # from whichcraft import which from shutil import which return which(name) is not None class Builder: def __init__(self, name, branch): self.name = name self.branch = branch self.is_release_branch = re.match("^blender-v(.*)-release$", branch) is not None # Buildbot runs from build/ directory self.blender_dir = os.path.abspath(os.path.join('..', 'blender.git')) self.build_dir = os.path.abspath(os.path.join('..', 'build')) self.install_dir = os.path.abspath(os.path.join('..', 'install')) self.upload_dir = os.path.abspath(os.path.join('..', 'install')) # Detect platform if name.startswith('mac'): self.platform = 'mac' self.command_prefix = [] elif name.startswith('linux'): self.platform = 'linux' if is_tool('scl'): self.command_prefix = ['scl', 'enable', 'devtoolset-6', '--'] else: self.command_prefix = [] elif name.startswith('win'): self.platform = 'win' self.command_prefix = [] else: raise ValueError('Unkonw platform for builder ' + self.platform) # Always 64 bit now self.bits = 64 def create_builder_from_arguments(): parser = argparse.ArgumentParser() parser.add_argument('builder_name') parser.add_argument('branch', default='master', nargs='?') args = parser.parse_args() return Builder(args.builder_name, args.branch) class VersionInfo: def __init__(self, builder): # Get version information buildinfo_h = os.path.join(builder.build_dir, "source", "creator", "buildinfo.h") blender_h = os.path.join(builder.blender_dir, "source", "blender", "blenkernel", "BKE_blender_version.h") version_number = int(self._parse_header_file(blender_h, 'BLENDER_VERSION')) version_number_patch = int(self._parse_header_file(blender_h, 'BLENDER_VERSION_PATCH')) version_numbers = (version_number // 100, version_number % 100, version_number_patch) self.short_version = "%d.%02d" % (version_numbers[0], version_numbers[1]) self.version = "%d.%02d.%d" % version_numbers self.version_cycle = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE') self.version_cycle_number = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE_NUMBER') self.hash = self._parse_header_file(buildinfo_h, 'BUILD_HASH')[1:-1] if self.version_cycle == "release": # Final release self.full_version = self.version self.is_development_build = False elif self.version_cycle == "rc": # Release candidate version_cycle = self.version_cycle + self.version_cycle_number self.full_version = self.version + version_cycle self.is_development_build = False else: # Development build self.full_version = self.version + '-' + self.hash self.is_development_build = True def _parse_header_file(self, filename, define): import re regex = re.compile("^#\s*define\s+%s\s+(.*)" % define) with open(filename, "r") as file: for l in file: match = regex.match(l) if match: return match.group(1) return None def call(cmd, env=None, exit_on_error=True): print(' '.join(cmd)) # Flush to ensure correct order output on Windows. sys.stdout.flush() sys.stderr.flush() retcode = subprocess.call(cmd, env=env) if exit_on_error and retcode != 0: sys.exit(retcode) return retcode
37.432
113
0.64159
acede20e051750745ef8888dcb7c0d83e2f57d13
4,314
py
Python
microsoft/testsuites/dpdk/dpdknffgo.py
cwize1/lisa
21c3be54c6464d4c0b47320987e5925db0a58b6b
[ "MIT" ]
null
null
null
microsoft/testsuites/dpdk/dpdknffgo.py
cwize1/lisa
21c3be54c6464d4c0b47320987e5925db0a58b6b
[ "MIT" ]
null
null
null
microsoft/testsuites/dpdk/dpdknffgo.py
cwize1/lisa
21c3be54c6464d4c0b47320987e5925db0a58b6b
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from typing import List, Type from lisa.executable import Tool from lisa.operating_system import Debian from lisa.tools import Echo, Git, Make, Tar, Wget from lisa.util import UnsupportedDistroException class DpdkNffGo(Tool): NFF_GO_SRC_LINK = "https://github.com/intel-go/nff-go.git" REPO_DIR = "nff-go" GO_TAR = "go1.17.6.linux-amd64.tar.gz" ubuntu_packages = [ "lua5.3-dev", "libpcap-dev", "libelf-dev", "hugepages", "libnuma-dev", "libhyperscan-dev", "liblua5.3-dev", "libmnl-dev", "libibverbs-dev", ] @property def command(self) -> str: return "nff-go" def _check_exists(self) -> bool: git_path = self.node.working_path.joinpath(self.REPO_DIR) return ( self.node.execute(f"test -a {git_path.as_posix()}", shell=True).exit_code == 0 ) @property def dependencies(self) -> List[Type[Tool]]: return [Echo, Make, Git, Tar, Wget] @property def can_install(self) -> bool: # nff-go only implemented for debain in lisav2 return isinstance(self.node.os, Debian) def _install(self) -> bool: node = self.node git = node.tools[Git] echo = node.tools[Echo] wget = node.tools[Wget] tar = node.tools[Tar] make = node.tools[Make] # grab the path, a workaround for the issue mentioned below in run_test original_path = echo.run( "$PATH", shell=True, expected_exit_code=0, expected_exit_code_failure_message="failure to grab $PATH via echo", ).stdout self.new_path = f"{original_path}:/usr/local/go/bin/" # get nff-go source and go binaries self.nff_go_path = git.clone( "https://github.com/intel-go/nff-go.git", cwd=node.working_path, dir_name=self.REPO_DIR, ) go_tar_path = wget.get( f"https://go.dev/dl/{self.GO_TAR}", file_path=str(node.working_path), filename=self.GO_TAR, ) # unpack and add to path tar.extract(go_tar_path, "/usr/local", sudo=True) # download go modules node.execute( "go mod download", cwd=self.nff_go_path, update_envs={"PATH": self.new_path}, expected_exit_code=0, expected_exit_code_failure_message=( "Could not install go modules for nff-go" ), ) # install needed libraries if isinstance(node.os, Debian): node.os.install_packages( [ "lua5.3-dev", "linux-headers-generic", "libnuma-dev", "libibverbs-dev", "libpcap-dev", "libmnl-dev", ] ) else: raise UnsupportedDistroException( node.os, "nff-go test not implemented on this OS" ) # make main project make.make( "", cwd=self.nff_go_path, update_envs={ "PATH": self.new_path, "NFF_GO_NO_BPF_SUPPORT": "1", }, ) # make dpdk components we need make.make( "", cwd=self.nff_go_path.joinpath("dpdk"), update_envs={ "NFF_GO_NO_BPF_SUPPORT": "1", }, ) return True def run_test(self) -> None: # NOTE: make.make and node.execute sudo=True shell=True # both have issues with variable expansion and update_env # This is a workaround to execute sudo with the right # variables and path, at some point if make tool and execute # are fixed we can switch back to using the make tool # make 'citesting' target self.node.execute( (f"PATH={self.new_path} " "NFF_GO_NO_BPF_SUPPORT=1 make citesting"), sudo=True, shell=True, cwd=self.nff_go_path, expected_exit_code=0, expected_exit_code_failure_message="NFF-GO tests failed", )
30.167832
85
0.546361
acede3f75a1d59be7106628897ab16b12460e1e7
485
py
Python
__pythonNewLine.py
simdevex/01.Basics
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
[ "MIT" ]
null
null
null
__pythonNewLine.py
simdevex/01.Basics
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
[ "MIT" ]
null
null
null
__pythonNewLine.py
simdevex/01.Basics
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
[ "MIT" ]
null
null
null
'''A Python program to print without newline or space.''' import functools def printSolutionOne(): for i in range(0, 10): print('*', end="") def printSolutionTwo(): printf = functools.partial(print, end="") for i in range(0, 10): printf('*') i = 0 def printSolutionThree(): i=0; while i<10 : print('*',end='') i = i+1 def main (): printSolutionOne() #printSolutionTwo() #printSolutionThree() main ()
19.4
57
0.560825
acede47021d02e1ddbdd303fa767ac5d2e87434b
1,123
py
Python
tutorial/spiders/cs_spider_ex3.py
Lucklyric/W17MM802-Scrapy-Toturial
f02deba75b2ddb4be05eaf4defb13b74158fd576
[ "MIT" ]
null
null
null
tutorial/spiders/cs_spider_ex3.py
Lucklyric/W17MM802-Scrapy-Toturial
f02deba75b2ddb4be05eaf4defb13b74158fd576
[ "MIT" ]
null
null
null
tutorial/spiders/cs_spider_ex3.py
Lucklyric/W17MM802-Scrapy-Toturial
f02deba75b2ddb4be05eaf4defb13b74158fd576
[ "MIT" ]
null
null
null
import scrapy from tutorial.items import CSStaff class CsSpider_ex3(scrapy.Spider): name = "cs_ex3" def start_requests(self): urls = [ "https://www.ualberta.ca/computing-science/faculty-and-staff/faculty", ] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): for person in response.xpath('//tbody/tr'): cs_staff_item = CSStaff() rows = person.xpath('./td') first_row = rows[0].xpath('./a/text()').extract() cs_staff_item['name']= first_row[0] cs_staff_item['position']= first_row[1] cs_staff_item['department']= rows[1].xpath('./text()').extract_first() area = rows[2].xpath('./text()').extract_first() cs_staff_item['area']= area if area is not None else "N/A" phone = rows[3].xpath('./text()').extract_first() cs_staff_item['phone'] = phone if phone is not None else "N/A" cs_staff_item['email'] = rows[3].xpath('./a/text()').extract_first() yield cs_staff_item
40.107143
82
0.58415
acede60ea4a7f682669d5e6a4e91a2f3ce03d3bf
126
py
Python
leonardo/views/__init__.py
themage/leonardo
60ff53ec30433cf21dacbb1f8c8f7676c6e412f6
[ "Apache-2.0" ]
null
null
null
leonardo/views/__init__.py
themage/leonardo
60ff53ec30433cf21dacbb1f8c8f7676c6e412f6
[ "Apache-2.0" ]
null
null
null
leonardo/views/__init__.py
themage/leonardo
60ff53ec30433cf21dacbb1f8c8f7676c6e412f6
[ "Apache-2.0" ]
null
null
null
from . import frontend from . import api from . import errors from . import search from . import multiple from . import proxy
18
22
0.761905
acede644c05059dcfb29deb55ae8e049242326a1
52,996
py
Python
HintList.py
ThomasJRyan/OoT-Randomizer
65bd46771359edaf64c965e6129a59e9c0e0f806
[ "MIT" ]
null
null
null
HintList.py
ThomasJRyan/OoT-Randomizer
65bd46771359edaf64c965e6129a59e9c0e0f806
[ "MIT" ]
null
null
null
HintList.py
ThomasJRyan/OoT-Randomizer
65bd46771359edaf64c965e6129a59e9c0e0f806
[ "MIT" ]
null
null
null
import random class Hint(object): name = "" text = "" type = [] def __init__(self, name, text, type, choice=None): self.name = name self.type = [type] if not isinstance(type, list) else type if isinstance(text, str): self.text = text else: if choice == None: self.text = random.choice(text) else: self.text = text[choice] def getHint(name, clearer_hint=False): textOptions, clearText, type = hintTable[name] if clearer_hint: if clearText == None: return Hint(name, textOptions, type, 0) return Hint(name, clearText, type) else: return Hint(name, textOptions, type) def getHintGroup(group, world): ret = [] for name in hintTable: # Some hints have a confusing text in the scope of grotto entrance shuffle so we exclude them if world.shuffle_grotto_entrances: if name == 'GS Hyrule Castle Grotto' or name == 'GS Hyrule Field Near Gerudo Valley': continue hint = getHint(name, world.clearer_hints) if hint.name in world.always_hints: hint.type = 'always' if group in hint.type and not (name in hintExclusions(world)): ret.append(hint) return ret def getRequiredHints(world): ret = [] for name in hintTable: hint = getHint(name) if 'always' in hint.type or hint.name in conditional_always and conditional_always[hint.name](world): ret.append(hint) return ret # Hints required under certain settings conditional_always = { '10 Big Poes': lambda world: world.big_poe_count > 3, 'Deku Theater Skull Mask': lambda world: world.hint_dist == 'tournament', 'Song from Ocarina of Time': lambda world: world.bridge not in ('stones', 'dungeons') and world.shuffle_ganon_bosskey not in ('lacs_stones', 'lacs_dungeons'), 'Ocarina of Time': lambda world: world.bridge not in ('stones', 'dungeons') and world.shuffle_ganon_bosskey not in ('lacs_stones', 'lacs_dungeons'), 'Sheik in Kakariko': lambda world: world.bridge not in ('medallions', 'dungeons') and world.shuffle_ganon_bosskey not in ('lacs_medallions', 'lacs_dungeons'), 'Biggoron': lambda world: world.logic_earliest_adult_trade != 'claim_check' or world.logic_latest_adult_trade != 'claim_check', '50 Gold Skulltula Reward': lambda world: world.bridge != 'tokens' or world.bridge_tokens < 50, '40 Gold Skulltula Reward': lambda world: world.bridge != 'tokens' or world.bridge_tokens < 40, '30 Gold Skulltula Reward': lambda world: world.bridge != 'tokens' or world.bridge_tokens < 30, } # table of hints, format is (name, hint text, clear hint text, type of hint) there are special characters that are read for certain in game commands: # ^ is a box break # & is a new line # @ will print the player name # # sets color to white (currently only used for dungeon reward hints). hintTable = { 'Triforce Piece': (["a triumph fork", "cheese", "a gold fragment"], "a Piece of the Triforce", "item"), 'Magic Meter': (["mystic training", "pixie dust", "a green rectangle"], "a Magic Meter", 'item'), 'Double Defense': (["a white outline", "damage decrease", "strengthened love"], "Double Defense", 'item'), 'Slingshot': (["a seed shooter", "a rubberband", "a child's catapult"], "a Slingshot", 'item'), 'Boomerang': (["a banana", "a stun stick"], "the Boomerang", 'item'), 'Bow': (["an archery enabler", "a danger dart launcher"], "a Bow", 'item'), 'Bomb Bag': (["an explosive container", "a blast bag"], "a Bomb Bag", 'item'), 'Progressive Hookshot': (["Dampe's keepsake", "the Grapple Beam", "the BOING! chain"], "a Hookshot", 'item'), 'Progressive Strength Upgrade': (["power gloves", "metal mittens", "the heavy lifty"], "a Strength Upgrade", 'item'), 'Progressive Scale': (["a deeper dive", "a piece of Zora"], "a Zora Scale", 'item'), 'Hammer': (["the dragon smasher", "the metal mallet", "the heavy hitter"], "the Megaton Hammer", 'item'), 'Iron Boots': (["sink shoes", "clank cleats"], "the Iron Boots", 'item'), 'Hover Boots': (["butter boots", "sacred slippers", "spacewalkers"], "the Hover Boots", 'item'), 'Kokiri Sword': (["a butter knife", "a starter slasher", "a switchblade"], "the Kokiri Sword", 'item'), 'Biggoron Sword': (["the biggest blade", "a colossal cleaver"], "the Biggoron Sword", 'item'), 'Master Sword': (["evil's bane"], "the Master Sword", 'item'), 'Deku Shield': (["a wooden ward", "a burnable barrier"], "a Deku Shield", 'item'), 'Hylian Shield': (["a steel safeguard", "Like Like's metal meal"], "a Hylian Shield", 'item'), 'Mirror Shield': (["the reflective rampart", "Medusa's weakness", "a silvered surface"], "the Mirror Shield", 'item'), 'Farores Wind': (["teleportation", "a relocation rune", "a green ball", "a green gust"], "Farore's Wind", 'item'), 'Nayrus Love': (["a safe space", "an impregnable aura", "a blue barrier", "a blue crystal"], "Nayru's Love", 'item'), 'Dins Fire': (["an inferno", "a heat wave", "a red ball"], "Din's Fire", 'item'), 'Fire Arrows': (["the furnace firearm", "the burning bolts", "a magma missile"], "the Fire Arrows", 'item'), 'Ice Arrows': (["the refrigerator rocket", "the frostbite bolts", "an iceberg maker"], "the Ice Arrows", 'item'), 'Light Arrows': (["the shining shot", "the luminous launcher", "Ganondorf's bane", "the lighting bolts"], "the Light Arrows", 'item'), 'Lens of Truth': (["a lie detector", "a ghost tracker", "true sight", "a detective's tool"], "the Lens of Truth", 'item'), 'Ocarina': (["a flute", "a music maker"], "an Ocarina", 'item'), 'Goron Tunic': (["ruby robes", "fireproof fabric", "cooking clothes"], "a Goron Tunic", 'item'), 'Zora Tunic': (["a sapphire suit", "scuba gear", "a swimsuit"], "a Zora Tunic", 'item'), 'Epona': (["a horse", "a four legged friend"], "Epona", 'item'), 'Zeldas Lullaby': (["a song of royal slumber", "a triforce tune"], "Zelda's Lullaby", 'item'), 'Eponas Song': (["an equestrian etude", "Malon's melody", "a ranch song"], "Epona's Song", 'item'), 'Sarias Song': (["a song of dancing Gorons", "Saria's phone number"], "Saria's Song", 'item'), 'Suns Song': (["Sunny Day", "the ReDead's bane", "the Gibdo's bane"], "the Sun's Song", 'item'), 'Song of Time': (["a song 7 years long", "the tune of ages"], "the Song of Time", 'item'), 'Song of Storms': (["Rain Dance", "a thunderstorm tune", "windmill acceleration"], "the Song of Storms", 'item'), 'Minuet of Forest': (["the song of tall trees", "an arboreal anthem", "a green spark trail"], "the Minuet of Forest", 'item'), 'Bolero of Fire': (["a song of lethal lava", "a red spark trail", "a volcanic verse"], "the Bolero of Fire", 'item'), 'Serenade of Water': (["a song of a damp ditch", "a blue spark trail", "the lake's lyric"], "the Serenade of Water", 'item'), 'Requiem of Spirit': (["a song of sandy statues", "an orange spark trail", "the desert ditty"], "the Requiem of Spirit", 'item'), 'Nocturne of Shadow': (["a song of spooky spirits", "a graveyard boogie", "a haunted hymn", "a purple spark trail"], "the Nocturne of Shadow", 'item'), 'Prelude of Light': (["a luminous prologue melody", "a yellow spark trail", "the temple traveler"], "the Prelude of Light", 'item'), 'Bottle': (["a glass container", "an empty jar", "encased air"], "a Bottle", 'item'), 'Bottle with Letter': (["a call for help", "the note that Mweeps", "an SOS call", "a fishy stationery"], "Ruto's Letter", 'item'), 'Bottle with Milk': (["cow juice", "a white liquid", "a baby's breakfast"], "a Milk Bottle", 'item'), 'Bottle with Red Potion': (["a vitality vial", "a red liquid"], "a Red Potion Bottle", 'item'), 'Bottle with Green Potion': (["a magic mixture", "a green liquid"], "a Green Potion Bottle", 'item'), 'Bottle with Blue Potion': (["an ailment antidote", "a blue liquid"], "a Blue Potion Bottle", 'item'), 'Bottle with Fairy': (["an imprisoned fairy", "an extra life", "Navi's cousin"], "a Fairy Bottle", 'item'), 'Bottle with Fish': (["an aquarium", "a deity's snack"], "a Fish Bottle", 'item'), 'Bottle with Blue Fire': (["a conflagration canteen", "an icemelt jar"], "a Blue Fire Bottle", 'item'), 'Bottle with Bugs': (["an insectarium", "Skulltula finders"], "a Bug Bottle", 'item'), 'Bottle with Poe': (["a spooky ghost", "a face in the jar"], "a Poe Bottle", 'item'), 'Bottle with Big Poe': (["the spookiest ghost", "a sidequest spirit"], "a Big Poe Bottle", 'item'), 'Stone of Agony': (["the shake stone", "the Rumble Pak (TM)"], "the Stone of Agony", 'item'), 'Gerudo Membership Card': (["a girl club membership", "a desert tribe's pass"], "the Gerudo Card", 'item'), 'Progressive Wallet': (["a mo' money holder", "a gem purse", "a portable bank"], "a Wallet", 'item'), 'Deku Stick Capacity': (["a lumber rack", "more flammable twigs"], "Deku Stick Capacity", 'item'), 'Deku Nut Capacity': (["more nuts", "flashbang storage"], "Deku Nut Capacity", 'item'), 'Heart Container': (["a lot of love", "a Valentine's gift", "a boss's organ"], "a Heart Container", 'item'), 'Piece of Heart': (["a little love", "a broken heart"], "a Piece of Heart", 'item'), 'Piece of Heart (Treasure Chest Game)': ("a victory valentine", "a Piece of Heart", 'item'), 'Recovery Heart': (["a free heal", "a hearty meal", "a Band-Aid"], "a Recovery Heart", 'item'), 'Rupee (Treasure Chest Game)': ("the dollar of defeat", 'a Green Rupee', 'item'), 'Deku Stick (1)': ("a breakable branch", 'a Deku Stick', 'item'), 'Rupee (1)': (["a unique coin", "a penny", "a green gem"], "a Green Rupee", 'item'), 'Rupees (5)': (["a common coin", "a blue gem"], "a Blue Rupee", 'item'), 'Rupees (20)': (["couch cash", "a red gem"], "a Red Rupee", 'item'), 'Rupees (50)': (["big bucks", "a purple gem", "wealth"], "a Purple Rupee", 'item'), 'Rupees (200)': (["a juicy jackpot", "a yellow gem", "a giant gem", "great wealth"], "a Huge Rupee", 'item'), 'Weird Egg': (["a chicken dilemma"], "the Weird Egg", 'item'), 'Zeldas Letter': (["an autograph", "royal stationery", "royal snail mail"], "Zelda's Letter", 'item'), 'Pocket Egg': (["a Cucco container", "a Cucco, eventually", "a fowl youth"], "the Pocket Egg", 'item'), 'Pocket Cucco': (["a little clucker"], "the Pocket Cucco", 'item'), 'Cojiro': (["a cerulean capon"], "Cojiro", 'item'), 'Odd Mushroom': (["a powder ingredient"], "an Odd Mushroom", 'item'), 'Odd Potion': (["Granny's goodies"], "an Odd Potion", 'item'), 'Poachers Saw': (["a tree killer"], "the Poacher's Saw", 'item'), 'Broken Sword': (["a shattered slicer"], "the Broken Sword", 'item'), 'Prescription': (["a pill pamphlet", "a doctor's note"], "the Prescription", 'item'), 'Eyeball Frog': (["a perceiving polliwog"], "the Eyeball Frog", 'item'), 'Eyedrops': (["a vision vial"], "the Eyedrops", 'item'), 'Claim Check': (["a three day wait"], "the Claim Check", 'item'), 'Map': (["a dungeon atlas", "blueprints"], "a Map", 'item'), 'Compass': (["a treasure tracker", "a magnetic needle"], "a Compass", 'item'), 'BossKey': (["a master of unlocking", "a dungeon's master pass"], "a Boss Key", 'item'), 'SmallKey': (["a tool for unlocking", "a dungeon pass", "a lock remover", "a lockpick"], "a Small Key", 'item'), 'FortressSmallKey': (["a get out of jail free card"], "a Jail Key", 'item'), 'KeyError': (["something mysterious", "an unknown treasure"], "An Error (Please Report This)", 'item'), 'Arrows (5)': (["a few danger darts", "a few sharp shafts"], "Arrows (5 pieces)", 'item'), 'Arrows (10)': (["some danger darts", "some sharp shafts"], "Arrows (10 pieces)", 'item'), 'Arrows (30)': (["plenty of danger darts", "plenty of sharp shafts"], "Arrows (30 pieces)", 'item'), 'Bombs (5)': (["a few explosives", "a few blast balls"], "Bombs (5 pieces)", 'item'), 'Bombs (10)': (["some explosives", "some blast balls"], "Bombs (10 pieces)", 'item'), 'Bombs (20)': (["lots-o-explosives", "plenty of blast balls"], "Bombs (20 pieces)", 'item'), 'Ice Trap': (["a gift from Ganon", "a chilling discovery", "frosty fun"], "an Ice Trap", 'item'), 'Magic Bean': (["a wizardly legume"], "a Magic Bean", 'item'), 'Magic Bean Pack': (["wizardly legumes"], "Magic Beans", 'item'), 'Bombchus': (["mice bombs", "proximity mice", "wall crawlers", "trail blazers"], "Bombchus", 'item'), 'Bombchus (5)': (["a few mice bombs", "a few proximity mice", "a few wall crawlers", "a few trail blazers"], "Bombchus (5 pieces)", 'item'), 'Bombchus (10)': (["some mice bombs", "some proximity mice", "some wall crawlers", "some trail blazers"], "Bombchus (10 pieces)", 'item'), 'Bombchus (20)': (["plenty of mice bombs", "plenty of proximity mice", "plenty of wall crawlers", "plenty of trail blazers"], "Bombchus (20 pieces)", 'item'), 'Deku Nuts (5)': (["some nuts", "some flashbangs", "some scrub spit"], "Deku Nuts (5 pieces)", 'item'), 'Deku Nuts (10)': (["lots-o-nuts", "plenty of flashbangs", "plenty of scrub spit"], "Deku Nuts (10 pieces)", 'item'), 'Deku Seeds (30)': (["catapult ammo", "lots-o-seeds"], "Deku Seeds (30 pieces)", 'item'), 'Gold Skulltula Token': (["proof of destruction", "an arachnid chip", "spider remains", "one percent of a curse"], "a Gold Skulltula Token", 'item'), 'Deku Theater Mask of Truth': ("the #Mask of Truth# yields", None, 'always'), 'Frog Ocarina Game': (["an #amphibian feast# yields", "the #croaking choir's magnum opus# awards", "the #froggy finale# yields"], "the final reward from the #Frogs of Zora's River# is", 'always'), 'Song from Ocarina of Time': ("the #Ocarina of Time# teaches", None, ['song', 'sometimes']), 'Song from Composer Grave': (["in the #Composers' Grave#, ReDead guard", "the #Composer Brothers# wrote"], None, ['song', 'sometimes']), 'Sheik Forest Song': ("deep in #the forest# Sheik teaches", None, ['song', 'sometimes']), 'Sheik at Temple': ("Sheik waits at a #monument to time# to teach", None, ['song', 'sometimes']), 'Sheik in Crater': ("the #crater's melody# is", None, ['song', 'sometimes']), 'Sheik in Ice Cavern': ("the #frozen cavern# echoes with", None, ['song', 'sometimes']), 'Sheik in Kakariko': ("a #ravaged village# mourns with", None, ['song', 'sometimes']), 'Sheik at Colossus': ("a hero ventures beyond #the Wasteland# to learn", None, ['song', 'sometimes']), 'Child Fishing': ("#fishing in youth# bestows", None, 'minigame'), 'Adult Fishing': ("#fishing in maturity# bestows", None, 'minigame'), 'Child Shooting Gallery': ("#shooting in youth# grants", None, 'minigame'), 'Adult Shooting Gallery': ("#shooting in maturity# grants", None, ['minigame', 'sometimes']), 'Bombchu Bowling Bomb Bag': ("the #first explosive prize# is", None, 'minigame'), 'Bombchu Bowling Piece of Heart': ("the #second explosive prize# is", None, 'minigame'), 'Treasure Chest Game': (["#gambling# grants", "there is a #1/32 chance# to win"], "the #treasure chest game# grants", ['minigame', 'sometimes']), 'Horseback Archery 1500 Points': ("mastery of #horseback archery# grants", "scoring 1500 in #horseback archery# grants", ['minigame', 'sometimes']), 'Links House Cow': ("the #bovine bounty of a horseback hustle# gifts", None, ['minigame', 'sometimes']), '10 Big Poes': (["#Big Poes# leads to", "#ghost hunters# will be rewarded with"], None, ['overworld', 'sometimes']), 'Deku Theater Skull Mask': ("the #Skull Mask# yields", None, ['overworld', 'sometimes']), 'Ocarina of Time': ("the #treasure thrown by Princess Zelda# is", None, ['overworld', 'sometimes']), 'Biggoron': ("#Biggoron# crafts", None, ['overworld', 'sometimes']), '50 Gold Skulltula Reward': ("slaying #50 Gold Skulltulas# reveals", None, ['overworld', 'sometimes']), '40 Gold Skulltula Reward': ("slaying #40 Gold Skulltulas# reveals", None, ['overworld', 'sometimes']), '30 Gold Skulltula Reward': ("slaying #30 Gold Skulltulas# reveals", None, ['overworld', 'sometimes']), '20 Gold Skulltula Reward': ("slaying #20 Gold Skulltulas# reveals", None, ['overworld', 'sometimes']), 'Anjus Chickens': ("#collecting cuccos# rewards", None, 'sometimes'), 'Darunias Joy': ("#Darunia's dance# leads to", None, ['overworld', 'sometimes']), 'Skull Kid': ("the #Skull Kid# grants", None, ['overworld', 'sometimes']), 'Lake Hylia Sun': ("staring into #the sun# grants", "shooting #the sun# grants", ['overworld', 'sometimes']), 'Heart Piece Grave Chest': ("playing #Sun's Song# in a grave spawns", None, ['overworld', 'sometimes']), 'Goron City Leftmost Maze Chest': ("in #Goron City# the hammer unlocks", None, ['overworld', 'sometimes']), 'Gerudo Valley Hammer Rocks Chest': ("in #Gerudo Valley# the hammer unlocks", None, ['overworld', 'sometimes']), 'GS Hyrule Castle Grotto': ("a #storm near the castle# reveals", None, ['overworld', 'sometimes']), 'GS Hyrule Field Near Gerudo Valley': ("buried near #the valley# a spider holds", None, ['overworld', 'sometimes']), 'GS Zora\'s Fountain Hidden Cave': ("a spider high above the #icy waters# holds", None, ['overworld', 'sometimes']), 'Haunted Wasteland Structure Chest': (["deep in the #Wasteland# is", "beneath #the sands#, flames reveal"], None, ['overworld', 'sometimes']), 'GS Wasteland Ruins': ("a #spider in the Wasteland# hold", None, ['overworld', 'sometimes']), 'Composer Grave Chest': (["in the #Composers' Grave#, darkness hides", "the #Composer Brothers# hid"], None, ['overworld', 'sometimes']), 'Zoras Fountain Bottom Freestanding PoH': ("under the #icy waters# lies", None, ['overworld', 'sometimes']), 'Goron City Pot Freestanding PoH': ("spinning #Goron pottery# contains", None, ['overworld', 'sometimes']), 'King Zora Thawed': ("unfreezing #King Zora# grants", None, ['overworld', 'sometimes']), 'DMC Deku Scrub Bombs': ("in the Crater a #scrub# sells", None, ['overworld', 'sometimes']), 'Deku Tree MQ After Spinning Log Chest': ("within #a tree#, a temporal stone contains", None, ['dungeon', 'sometimes']), 'GS Deku Tree MQ Basement Ceiling': ("within #a tree#, a spider on the ceiling holds", None, ['dungeon', 'sometimes']), 'Boomerang Chest': ("in the #belly of a deity#, a school of stingers guard", None, 'sometimes'), 'GS Jabu Jabu MQ Invisible Enemies Room': ("in the #belly of a deity#, a spider surrounded by shadows holds", None, ['dungeon', 'sometimes']), 'Forest Temple Floormaster Chest': ("deep in #the forest#, shadows guard a chest containing", "a Floormaster in #Forest Temple# guards", ['dungeon', 'sometimes']), 'Fire Temple Scarecrow Chest': ("high in the #Fire Temple#, Pierre hid", None, ['dungeon', 'sometimes']), 'Fire Temple Megaton Hammer Chest': ("high in the #Fire Temple#, Flare Dancers hid", None, ['dungeon', 'sometimes']), 'Fire Temple MQ West Tower Top Chest': ("high in the #Fire Temple#, Flare Dancers hid", None, ['dungeon', 'sometimes']), 'GS Fire Temple MQ Above Fire Wall Maze': ("high in the #Fire Temple#, a spider holds", None, ['dungeon', 'sometimes']), 'Water Temple River Chest': ("deep under #the lake#, beyond the currents, hides", "the #Water Temple River Chest# holds", ['dungeon', 'sometimes']), 'Water Temple Boss Key Chest': ("deep under #the lake#, the gilded chest contains", "the #Water Temple Gilded Chest# holds", ['dungeon', 'sometimes']), 'Water Temple MQ Boss Key Chest': ("deep under #the lake#, the gilded chest contains", "the #Water Temple Gilded Chest# holds", ['dungeon', 'sometimes']), 'Water Temple MQ Freestanding Key': ("deep under #the lake#, the apparent key is really", None, ['dungeon', 'sometimes']), 'GS Water Temple MQ North Basement': ("deep under #the lake#, the locked spider holds", None, ['dungeon', 'sometimes']), 'Gerudo Training Grounds Underwater Silver Rupee Chest': ("those who seek #sunken silver rupees# will find", None, ['dungeon', 'sometimes']), 'Gerudo Training Grounds MQ Underwater Silver Rupee Chest': ("those who seek #sunken silver rupees# will find", None, ['dungeon', 'sometimes']), 'Gerudo Training Grounds Maze Path Final Chest': ("the final prize of #the thieves\' training# is", None, ['dungeon', 'sometimes']), 'Gerudo Training Grounds MQ Ice Arrows Chest': ("the final prize of #the thieves\' training# is", None, ['dungeon', 'sometimes']), 'Bottom of the Well Defeat Boss': ("#Dead Hand# holds", "#Dead Hand# in the well holds", ['dungeon', 'sometimes']), 'Bottom of the Well MQ Compass Chest': ("#Dead Hand# holds", "#Dead Hand# in the well holds", ['dungeon', 'sometimes']), 'Silver Gauntlets Chest': ("upon the #Colossus's right hand# is", None, ['dungeon', 'sometimes']), 'Mirror Shield Chest': ("upon the #Colossus's left hand# is", None, ['dungeon', 'sometimes']), 'Spirit Temple MQ Child Center Chest': ("within #the Colossus# a temporal paradox yields", None, ['dungeon', 'sometimes']), 'Spirit Temple MQ Lower Adult Right Chest': ("within #the Colossus# a symphony yields", None, ['dungeon', 'sometimes']), 'GS Spirit Temple MQ Lower Adult Right': ("within #the Colossus# a spider's symphony yields", None, ['dungeon', 'sometimes']), 'Shadow Temple Hidden Floormaster Chest': (["shadows in an #invisible maze# guard", "after a free #boat ride# comes"], None, ['dungeon', 'sometimes']), 'Shadow Temple MQ Bomb Flower Chest': (["shadows in an #invisible maze# guard", "after a free #boat ride# comes"], None, ['dungeon', 'sometimes']), 'Desert Colossus -> Desert Colossus Grotto': ("lifting a rock in #the desert# reveals", None, 'entrance'), 'Gerudo Valley Grotto Ledge -> Gerudo Valley Octorok Grotto':("on #a ledge in the valley#, a silver rock hides", None, 'entrance'), 'Goron City Grotto Platform -> Goron City Grotto': ("a #pool of lava# in Goron City blocks the way to", None, 'entrance'), 'Gerudo Fortress -> Gerudo Fortress Storms Grotto': ("a #storm within Gerudo's Fortress# reveals", None, 'entrance'), 'Zoras Domain -> Zoras Domain Storms Grotto': ("a #storm within Zora's Domain# reveals", None, 'entrance'), 'Hyrule Castle Grounds -> Castle Storms Grotto': ("a #storm near the castle# reveals", None, 'entrance'), 'Desert Colossus -> Colossus Fairy': ("a fractured wall #in the desert# hides", None, 'entrance'), 'Ganons Castle Grounds -> Ganons Castle Fairy': ("a heavy pillar #outside the castle# obstructs", None, 'entrance'), 'Death Mountain Crater Lower Nearby -> Crater Fairy': ("using a hammer #in the Crater# opens the path to", None, 'entrance'), 'Zoras Fountain -> Zoras Fountain Fairy': ("a particular wall in #Zora's Fountain# hides", None, 'entrance'), 'Gerudo Valley Far Side -> Carpenter Tent': ("a #tent in the valley# covers", None, 'entrance'), 'Shadow Temple Warp Region -> Shadow Temple Entryway': ("at the back of #the Graveyard#, there is", None, 'entrance'), 'Lake Hylia -> Water Temple Lobby': ("deep #under a vast lake#, one can find", None, 'entrance'), 'Forest Temple Entrance Ledge -> Forest Temple Lobby': ("deep #within the Meadow#, one can find", None, 'entrance'), 'Gerudo Fortress -> Gerudo Training Grounds Lobby': ("paying a fee #within Gerudo's Fortress# grants access to", None, 'entrance'), 'Zoras Fountain Ice Ledge -> Ice Cavern Beginning': ("in #a frozen fountain#, an opening leads to", None, 'entrance'), 'Zoras Fountain -> Jabu Jabus Belly Beginning': ("inside #Jabu Jabu#, one can find", None, 'entrance'), 'Links House': ("Link's House", None, 'region'), 'Temple of Time': ("Temple of Time", None, 'region'), 'Mido House': ("Mido's house", None, 'region'), 'Saria House': ("Saria's House", None, 'region'), 'House of Twins': ("the #House of Twins#", None, 'region'), 'Know It All House': ("Know-It-All Brothers' House", None, 'region'), 'Kokiri Shop': ("the #Kokiri Shop#", None, 'region'), 'Lake Hylia Lab': ("the #Lakeside Laboratory#", None, 'region'), 'Fishing Hole': ("the #Fishing Pond#", None, 'region'), 'Carpenter Tent': ("Carpenters' tent", None, 'region'), 'Castle Town Rupee Room': ("the #Guard House#", None, 'region'), 'Castle Town Mask Shop': ("the #Happy Mask Shop#", None, 'region'), 'Castle Town Bombchu Bowling': ("the #Bombchu Bowling#", None, 'region'), 'Castle Town Potion Shop': ("the #Market Potion Shop#", None, 'region'), 'Castle Town Treasure Chest Game': ("the #Treasure Chest Game#", None, 'region'), 'Castle Town Bombchu Shop': ("the #Bombchu Shop#", None, 'region'), 'Castle Town Man in Green House': ("Man in Green's House", None, 'region'), 'Windmill': ("the #Windmill#", None, 'region'), 'Carpenter Boss House': ("the #Carpenters' Boss House#", None, 'region'), 'House of Skulltula': ("the #House of Skulltulas#", None, 'region'), 'Impas House': ("Impa's House", None, 'region'), 'Impas House Back': ("Impa's cow cage", None, 'region'), 'Odd Medicine Building': ("Granny's Potion Shop", None, 'region'), 'Dampes House': ("Dampe's Hut", None, 'region'), 'Goron Shop': ("the #Goron Shop#", None, 'region'), 'Zora Shop': ("the #Zora Shop#", None, 'region'), 'Talon House': ("Talon's House", None, 'region'), 'Ingo Barn': ("a #stable#", None, 'region'), 'Lon Lon Corner Tower': ("the #Lon Lon Tower#", None, 'region'), 'Castle Town Bazaar': ("the #Market Bazaar#", None, 'region'), 'Castle Town Shooting Gallery': ("a #Slingshot Shooting Gallery#", None, 'region'), 'Kakariko Bazaar': ("the #Kakariko Bazaar#", None, 'region'), 'Kakariko Potion Shop Front': ("the #Kakariko Potion Shop#", None, 'region'), 'Kakariko Potion Shop Back': ("the #Kakariko Potion Shop#", None, 'region'), 'Kakariko Shooting Gallery': ("a #Bow Shooting Gallery#", None, 'region'), 'Colossus Fairy': ("a #Great Fairy Fountain#", None, 'region'), 'Hyrule Castle Fairy': ("a #Great Fairy Fountain#", None, 'region'), 'Ganons Castle Fairy': ("a #Great Fairy Fountain#", None, 'region'), 'Crater Fairy': ("a #Great Fairy Fountain#", None, 'region'), 'Mountain Summit Fairy': ("a #Great Fairy Fountain#", None, 'region'), 'Zoras Fountain Fairy': ("a #Great Fairy Fountain#", None, 'region'), 'Shield Grave': ("a #grave with a free chest#", None, 'region'), 'Heart Piece Grave': ("a chest spawned by #Sun's Song#", None, 'region'), 'Composer Grave': ("the #Composers' Grave#", None, 'region'), 'Dampes Grave': ("Dampe's Grave", None, 'region'), 'Mountain Bombable Grotto': ("a solitary #Cow#", None, 'region'), 'Castle Storms Grotto': ("a sandy grotto with #fragile walls#", None, 'region'), 'Field North Lon Lon Grotto': ("a pool guarded by a #Tektite#", None, 'region'), 'Field Kakariko Grotto': ("a #Big Skulltula# guarding a Gold one", None, 'region'), 'Field Valley Grotto': ("a grotto full of #spider webs#", None, 'region'), 'Kakariko Bombable Grotto': ("#ReDeads# guarding a chest", None, 'region'), 'Front of Meadow Grotto': ("#Wolfos# guarding a chest", None, 'region'), 'Gerudo Valley Octorok Grotto': ("an #Octorok# guarding a rich pool", None, 'region'), 'Deku Theater': ("the #Lost Woods Stage#", None, 'region'), 'Zora River Plateau Open Grotto': ("a #generic grotto#", None, 'region'), 'Top of Crater Grotto': ("a #generic grotto#", None, 'region'), 'Mountain Storms Grotto': ("a #generic grotto#", None, 'region'), 'Kakariko Back Grotto': ("a #generic grotto#", None, 'region'), 'Field West Castle Town Grotto': ("a #generic grotto#", None, 'region'), 'Field Near Lake Outside Fence Grotto': ("a #generic grotto#", None, 'region'), 'Remote Southern Grotto': ("a #generic grotto#", None, 'region'), 'Kokiri Forest Storms Grotto': ("a #generic grotto#", None, 'region'), 'Lost Woods Generic Grotto': ("a #generic grotto#", None, 'region'), 'Field Near Lake Inside Fence Grotto': ("a #single Upgrade Deku Scrub#", None, 'region'), 'Lost Woods Sales Grotto': ("#2 Deku Scrubs# including an Upgrade one", None, 'region'), 'Desert Colossus Grotto': ("2 Deku Scrubs", None, 'region'), 'Zora River Storms Grotto': ("2 Deku Scrubs", None, 'region'), 'Meadow Storms Grotto': ("2 Deku Scrubs", None, 'region'), 'Gerudo Valley Storms Grotto': ("2 Deku Scrubs", None, 'region'), 'Lake Hylia Grotto': ("3 Deku Scrubs", None, 'region'), 'DMC Hammer Grotto': ("3 Deku Scrubs", None, 'region'), 'Goron City Grotto': ("3 Deku Scrubs", None, 'region'), 'Lon Lon Grotto': ("3 Deku Scrubs", None, 'region'), 'Zora River Plateau Bombable Grotto': ("a small #Fairy Fountain#", None, 'region'), 'Field Far West Castle Town Grotto': ("a small #Fairy Fountain#", None, 'region'), 'Meadow Fairy Grotto': ("a small #Fairy Fountain#", None, 'region'), 'Zoras Domain Storms Grotto': ("a small #Fairy Fountain#", None, 'region'), 'Gerudo Fortress Storms Grotto': ("a small #Fairy Fountain#", None, 'region'), '1001': ("Ganondorf 2020!", None, 'junk'), '1002': ("They say that monarchy is a terrible system of governance.", None, 'junk'), '1003': ("They say that Zelda is a poor leader.", None, 'junk'), '1004': ("These hints can be quite useful. This is an exception.", None, 'junk'), '1006': ("They say that all the Zora drowned in Wind Waker.", None, 'junk'), '1007': ("They say that PJ64 is a terrible emulator.", None, 'junk'), '1008': ("'Member when Ganon was a blue pig?^I 'member.", None, 'junk'), '1009': ("One who does not have Triforce can't go in.", None, 'junk'), '1010': ("Save your future, end the Happy Mask Salesman.", None, 'junk'), '1012': ("I'm stoned. Get it?", None, 'junk'), '1013': ("Hoot! Hoot! Would you like me to repeat that?", None, 'junk'), '1014': ("Gorons are stupid. They eat rocks.", None, 'junk'), '1015': ("They say that Lon Lon Ranch prospered under Ingo.", None, 'junk'), '1016': ("The single rupee is a unique item.", None, 'junk'), '1017': ("Without the Lens of Truth, the Treasure Chest Mini-Game is a 1 out of 32 chance.^Good luck!", None, 'junk'), '1018': ("Use bombs wisely.", None, 'junk'), '1021': ("I found you, faker!", None, 'junk'), '1022': ("You're comparing yourself to me?^Ha! You're not even good enough to be my fake.", None, 'junk'), '1023': ("I'll make you eat those words.", None, 'junk'), '1024': ("What happened to Sheik?", None, 'junk'), '1025': ("L2P @.", None, 'junk'), '1026': ("I heard @ isn't very good at Zelda.", None, 'junk'), '1027': ("I'm Lonk from Pennsylvania.", None, 'junk'), '1028': ("I bet you'd like to have more bombs.", None, 'junk'), '1029': ("When all else fails, use Fire.", None, 'junk'), '1030': ("Here's a hint, @. Don't be bad.", None, 'junk'), '1031': ("Game Over. Return of Ganon.", None, 'junk'), '1032': ("May the way of the Hero lead to the Triforce.", None, 'junk'), '1033': ("Can't find an item? Scan an Amiibo.", None, 'junk'), '1034': ("They say this game has just a few glitches.", None, 'junk'), '1035': ("BRRING BRRING This is Ulrira. Wrong number?", None, 'junk'), '1036': ("Tingle Tingle Kooloo Limpah", None, 'junk'), '1037': ("L is real 2041", None, 'junk'), '1038': ("They say that Ganondorf will appear in the next Mario Tennis.", None, 'junk'), '1039': ("Medigoron sells the earliest Breath of the Wild demo.", None, 'junk'), '1040': ("There's a reason why I am special inquisitor!", None, 'junk'), '1041': ("You were almost a @ sandwich.", None, 'junk'), '1042': ("I'm a helpful hint Gossip Stone!^See, I'm helping.", None, 'junk'), '1043': ("Dear @, please come to the castle. I've baked a cake for you.&Yours truly, princess Zelda.", None, 'junk'), '1044': ("They say all toasters toast toast.", None, 'junk'), '1045': ("They say that Okami is the best Zelda game.", None, 'junk'), '1046': ("They say that quest guidance can be found at a talking rock.", None, 'junk'), '1047': ("They say that the final item you're looking for can be found somewhere in Hyrule.", None, 'junk'), '1048': ("Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.", None, 'junk'), '1049': ("They say that Barinade fears Deku Nuts.", None, 'junk'), '1050': ("They say that Flare Dancers do not fear Goron-crafted blades.", None, 'junk'), '1051': ("They say that Morpha is easily trapped in a corner.", None, 'junk'), '1052': ("They say that Bongo Bongo really hates the cold.", None, 'junk'), '1053': ("They say that crouch stabs mimic the effects of your last attack.", None, 'junk'), '1054': ("They say that bombing the hole Volvagia last flew into can be rewarding.", None, 'junk'), '1055': ("They say that invisible ghosts can be exposed with Deku Nuts.", None, 'junk'), '1056': ("They say that the real Phantom Ganon is bright and loud.", None, 'junk'), '1057': ("They say that walking backwards is very fast.", None, 'junk'), '1058': ("They say that leaping above the Castle Town entrance enriches most children.", None, 'junk'), 'Deku Tree': ("an ancient tree", "Deku Tree", 'dungeonName'), 'Dodongos Cavern': ("an immense cavern", "Dodongo's Cavern", 'dungeonName'), 'Jabu Jabus Belly': ("the belly of a deity", "Jabu Jabu's Belly", 'dungeonName'), 'Forest Temple': ("a deep forest", "Forest Temple", 'dungeonName'), 'Fire Temple': ("a high mountain", "Fire Temple", 'dungeonName'), 'Water Temple': ("a vast lake", "Water Temple", 'dungeonName'), 'Shadow Temple': ("the house of the dead", "Shadow Temple", 'dungeonName'), 'Spirit Temple': ("the goddess of the sand", "Spirit Temple", 'dungeonName'), 'Ice Cavern': ("a frozen maze", "Ice Cavern", 'dungeonName'), 'Bottom of the Well': ("a shadow\'s prison", "Bottom of the Well", 'dungeonName'), 'Gerudo Training Grounds': ("the test of thieves", "Gerudo Training Grounds", 'dungeonName'), 'Ganons Castle': ("a conquered citadel", "Ganon's Castle", 'dungeonName'), 'Queen Gohma': ("One inside an #ancient tree#...^", "One in the #Deku Tree#...^", 'boss'), 'King Dodongo': ("One within an #immense cavern#...^", "One in #Dodongo's Cavern#...^", 'boss'), 'Barinade': ("One in the #belly of a deity#...^", "One in #Jabu Jabu's Belly#...^", 'boss'), 'Phantom Ganon': ("One in a #deep forest#...^", "One in the #Forest Temple#...^", 'boss'), 'Volvagia': ("One on a #high mountain#...^", "One in the #Fire Temple#...^", 'boss'), 'Morpha': ("One under a #vast lake#...^", "One in the #Water Temple#...^", 'boss'), 'Bongo Bongo': ("One within the #house of the dead#...^", "One in the #Shadow Temple#...^", 'boss'), 'Twinrova': ("One inside a #goddess of the sand#...^", "One in the #Spirit Temple#...^", 'boss'), 'Links Pocket': ("One in #@'s pocket#...^", "One #@ already has#...^", 'boss'), 'Spiritual Stone Text Start': ("Ye who owns 3 Spiritual Stones...^", None, 'boss'), 'Spiritual Stone Text End': ("\x13\x08Stand with the Ocarina of Time&and play the Song of Time.", None, 'boss'), 'Medallion Text Start': ("When evil rules all, an awakening&voice from the Sacred Realm will&call those destined to be Sages,&who dwell in the \x05\x41five temples\x05\x40.^", None, 'boss'), 'Medallion Text End': ("\x13\x12Together with the Hero of Time,&the awakened ones will bind&the evil and return the light&of peace to the world.", None, 'boss'), 'Validation Line': ("Hmph... Since you made it this far,&I'll let you know what glorious&prize of Ganon's you likely&missed out on in my tower.^Behold...^", None, 'validation line'), 'Light Arrow Location': ("Ha ha ha... You'll never beat me by&reflecting my lightning bolts&and unleashing the arrows from&", None, 'Light Arrow Location'), '2001': ("Oh! It's @.&I was expecting someone called&Sheik. Do you know what&happened to them?", None, 'ganonLine'), '2002': ("I knew I shouldn't have put the key&on the other side of my door.", None, 'ganonLine'), '2003': ("Looks like it's time for a&round of tennis.", None, 'ganonLine'), '2004': ("You'll never deflect my bolts of&energy with your sword,&then shoot me with those Light&Arrows you happen to have.", None, 'ganonLine'), '2005': ("Why did I leave my trident&back in the desert?", None, 'ganonLine'), '2006': ("Zelda is probably going to do&something stupid, like send you&back to your own timeline.^So this is quite meaningless.&Do you really want&to save this moron?", None, 'ganonLine'), '2007': ("What about Zelda makes you think&she'd be a better ruler than I?^I saved Lon Lon Ranch,&fed the hungry,&and my castle floats.", None, 'ganonLine'), '2008': ("I've learned this spell,&it's really neat,&I'll keep it later&for your treat!", None, 'ganonLine'), '2009': ("Many tricks are up my sleeve,&to save yourself&you'd better leave!", None, 'ganonLine'), '2010': ("After what you did to&Koholint Island, how can&you call me the bad guy?", None, 'ganonLine'), '2011': ("Today, let's begin down&'The Hero is Defeated' timeline.", None, 'ganonLine'), } # This specifies which hints will never appear due to either having known or known useless contents or due to the locations not existing. def hintExclusions(world, clear_cache=False): if not clear_cache and hintExclusions.exclusions is not None: return hintExclusions.exclusions hintExclusions.exclusions = [] hintExclusions.exclusions.extend(world.disabled_locations) for location in world.get_locations(): if location.locked: hintExclusions.exclusions.append(location.name) world_location_names = [ location.name for location in world.get_locations()] location_hints = [] for name in hintTable: hint = getHint(name, world.clearer_hints) if any(item in hint.type for item in ['always', 'sometimes', 'minigame', 'overworld', 'dungeon', 'song']): location_hints.append(hint) for hint in location_hints: if hint.name not in world_location_names and hint.name not in hintExclusions.exclusions: hintExclusions.exclusions.append(hint.name) return hintExclusions.exclusions hintExclusions.exclusions = None
107.496957
245
0.466054
acede6a18ae32d8ee51210e8fb1d399c463e33e5
10,063
py
Python
onmt/Loss.py
Zhao-Yuting/MNMT-with-semantic-regions
0376763e421ae96af882b68a522a50fc968123c5
[ "MIT" ]
5
2020-11-23T07:29:52.000Z
2022-02-21T04:57:37.000Z
onmt/Loss.py
Zhao-Yuting/MNMT-with-semantic-regions
0376763e421ae96af882b68a522a50fc968123c5
[ "MIT" ]
null
null
null
onmt/Loss.py
Zhao-Yuting/MNMT-with-semantic-regions
0376763e421ae96af882b68a522a50fc968123c5
[ "MIT" ]
null
null
null
""" This file handles the details of the loss function during training. This includes: LossComputeBase and the standard NMTLossCompute, and sharded loss compute stuff. """ from __future__ import division import torch import torch.nn as nn from torch.autograd import Variable import onmt import onmt.io class LossComputeBase(nn.Module): """ Class for managing efficient loss computation. Handles sharding next step predictions and accumulating mutiple loss computations Users can implement their own loss computation strategy by making subclass of this one. Users need to implement the _compute_loss() and make_shard_state() methods. Args: generator (:obj:`nn.Module`) : module that maps the output of the decoder to a distribution over the target vocabulary. tgt_vocab (:obj:`Vocab`) : torchtext vocab object representing the target output normalzation (str): normalize by "sents" or "tokens" """ def __init__(self, generator, tgt_vocab): super(LossComputeBase, self).__init__() self.generator = generator self.tgt_vocab = tgt_vocab self.padding_idx = tgt_vocab.stoi[onmt.io.PAD_WORD] def _make_shard_state(self, batch, output, range_, attns=None): """ Make shard state dictionary for shards() to return iterable shards for efficient loss computation. Subclass must define this method to match its own _compute_loss() interface. Args: batch: the current batch. output: the predict output from the model. range_: the range of examples for computing, the whole batch or a trunc of it? attns: the attns dictionary returned from the model. """ return NotImplementedError def _compute_loss(self, batch, output, target, **kwargs): """ Compute the loss. Subclass must define this method. Args: batch: the current batch. output: the predict output from the model. target: the validate target to compare output with. **kwargs(optional): additional info for computing loss. """ return NotImplementedError def monolithic_compute_loss(self, batch, output, attns): """ Compute the forward loss for the batch. Args: batch (batch): batch of labeled examples output (:obj:`FloatTensor`): output of decoder model `[tgt_len x batch x hidden]` attns (dict of :obj:`FloatTensor`) : dictionary of attention distributions `[tgt_len x batch x src_len]` Returns: :obj:`onmt.Statistics`: loss statistics """ range_ = (0, batch.tgt.size(0)) shard_state = self._make_shard_state(batch, output, range_, attns) _, batch_stats = self._compute_loss(batch, **shard_state) return batch_stats def sharded_compute_loss(self, batch, output, attns, cur_trunc, trunc_size, shard_size, normalization): """Compute the forward loss and backpropagate. Computation is done with shards and optionally truncation for memory efficiency. Also supports truncated BPTT for long sequences by taking a range in the decoder output sequence to back propagate in. Range is from `(cur_trunc, cur_trunc + trunc_size)`. Note harding is an exact efficiency trick to relieve memory required for the generation buffers. Truncation is an approximate efficiency trick to relieve the memory required in the RNN buffers. Args: batch (batch) : batch of labeled examples output (:obj:`FloatTensor`) : output of decoder model `[tgt_len x batch x hidden]` attns (dict) : dictionary of attention distributions `[tgt_len x batch x src_len]` cur_trunc (int) : starting position of truncation window trunc_size (int) : length of truncation window shard_size (int) : maximum number of examples in a shard Returns: :obj:`onmt.Statistics`: validation loss statistics """ batch_stats = onmt.Statistics() range_ = (cur_trunc, cur_trunc + trunc_size) shard_state = self._make_shard_state(batch, output, range_, attns) for shard in shards(shard_state, shard_size): loss, stats = self._compute_loss(batch, **shard) loss.div(normalization).backward(retain_graph=True) batch_stats.update(stats) return batch_stats def _stats(self, loss, scores, target): """ Args: loss (:obj:`FloatTensor`): the loss computed by the loss criterion. scores (:obj:`FloatTensor`): a score for each possible output target (:obj:`FloatTensor`): true targets Returns: :obj:`Statistics` : statistics for this batch. """ pred = scores.max(1)[1] non_padding = target.ne(self.padding_idx) num_correct = pred.eq(target) \ .masked_select(non_padding) \ .sum().item() num_non_padding = non_padding.sum().item() return onmt.Statistics(loss.item(), num_non_padding, num_correct) def _bottle(self, v): return v.view(-1, v.size(2)) def _unbottle(self, v, batch_size): return v.view(-1, batch_size, v.size(1)) class NMTLossCompute(LossComputeBase): """ Standard NMT Loss Computation. """ def __init__(self, generator, tgt_vocab, normalization="sents", label_smoothing=0.0): super(NMTLossCompute, self).__init__(generator, tgt_vocab) assert (label_smoothing >= 0.0 and label_smoothing <= 1.0) if label_smoothing > 0: # When label smoothing is turned on, # KL-divergence between q_{smoothed ground truth prob.}(w) # and p_{prob. computed by model}(w) is minimized. # If label smoothing value is set to zero, the loss # is equivalent to NLLLoss or CrossEntropyLoss. # All non-true labels are uniformly set to low-confidence. self.criterion = nn.KLDivLoss(size_average=False) one_hot = torch.randn(1, len(tgt_vocab)) one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2)) one_hot[0][self.padding_idx] = 0 self.register_buffer('one_hot', one_hot) else: weight = torch.ones(len(tgt_vocab)) weight[self.padding_idx] = 0 self.criterion = nn.NLLLoss(weight, size_average=False) self.confidence = 1.0 - label_smoothing def _make_shard_state(self, batch, output, range_, attns=None): return { "output": output, "target": batch.tgt[range_[0] + 1: range_[1]], } def _compute_loss(self, batch, output, target): scores = self.generator(self._bottle(output)) gtruth = target.view(-1) if self.confidence < 1: tdata = gtruth.data mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze() likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1)) tmp_ = self.one_hot.repeat(gtruth.size(0), 1) tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence) if mask.dim() > 0: likelihood.index_fill_(0, mask, 0) tmp_.index_fill_(0, mask, 0) gtruth = Variable(tmp_, requires_grad=False) loss = self.criterion(scores, gtruth) if self.confidence < 1: loss_data = - likelihood.sum(0) else: loss_data = loss.data.clone() stats = self._stats(loss_data, scores.data, target.view(-1).data) return loss, stats def filter_shard_state(state): for k, v in state.items(): if v is not None: if isinstance(v, Variable) and v.requires_grad: v = Variable(v.data, requires_grad=True, volatile=False) yield k, v def shards(state, shard_size, eval=False): """ Args: state: A dictionary which corresponds to the output of *LossCompute._make_shard_state(). The values for those keys are Tensor-like or None. shard_size: The maximum size of the shards yielded by the model. eval: If True, only yield the state, nothing else. Otherwise, yield shards. Yields: Each yielded shard is a dict. Side effect: After the last shard, this function does back-propagation. """ if eval: yield state else: # non_none: the subdict of the state dictionary where the values # are not None. non_none = dict(filter_shard_state(state)) # Now, the iteration: # state is a dictionary of sequences of tensor-like but we # want a sequence of dictionaries of tensors. # First, unzip the dictionary into a sequence of keys and a # sequence of tensor-like sequences. keys, values = zip(*((k, torch.split(v, shard_size)) for k, v in non_none.items())) # Now, yield a dictionary for each shard. The keys are always # the same. values is a sequence of length #keys where each # element is a sequence of length #shards. We want to iterate # over the shards, not over the keys: therefore, the values need # to be re-zipped by shard and then each shard can be paired # with the keys. for shard_tensors in zip(*values): yield dict(zip(keys, shard_tensors)) # Assumed backprop'd variables = ((state[k], v.grad.data) for k, v in non_none.items() if isinstance(v, Variable) and v.grad is not None) inputs, grads = zip(*variables) torch.autograd.backward(inputs, grads)
37.830827
79
0.616913
acede6ba7039a49570e155f2f61b696d5910042d
737
py
Python
aiakos/openid_provider/tokens/code_sql.py
aiakos/aiakos
a591e7ef13ab9e8e14b4d3569d43fce694c4150a
[ "BSD-2-Clause", "MIT" ]
4
2017-04-28T19:09:17.000Z
2018-07-03T04:43:54.000Z
aiakos/openid_provider/tokens/code_sql.py
aiakos/aiakos
a591e7ef13ab9e8e14b4d3569d43fce694c4150a
[ "BSD-2-Clause", "MIT" ]
2
2020-06-05T17:46:47.000Z
2021-06-10T17:22:58.000Z
aiakos/openid_provider/tokens/code_sql.py
aiakos/aiakos
a591e7ef13ab9e8e14b4d3569d43fce694c4150a
[ "BSD-2-Clause", "MIT" ]
2
2017-08-14T07:15:14.000Z
2019-03-04T14:02:05.000Z
import os from datetime import timedelta from django.core.exceptions import ValidationError from django.db import models from django.utils import timezone from ._sql import SQLBaseModel class Code(SQLBaseModel): nonce = models.CharField(max_length=255, blank=True, default='') def makeCode(client, user, scope, nonce=''): expires_in = int(os.environ.get('EXPIRE_CODE', 60)) code = Code.objects.create(client=client, user=user, scope=scope, nonce=nonce, expires_at=timezone.now() + timedelta(seconds=expires_in)) return str(code.id) def expandCode(code): try: code = Code.objects.get(id=code) code.delete() # TODO use atomic get-and-delete return code except (Code.DoesNotExist, ValidationError): raise ValueError()
26.321429
138
0.762551
acede6e4896842bf54b807c3ab5be18558ec211e
1,567
py
Python
REQreate/spatial_distribution_class.py
michellqueiroz-ua/instance-generator
8b431a64898bcf1006464a8394824ab57576811e
[ "MIT" ]
1
2022-02-09T07:33:07.000Z
2022-02-09T07:33:07.000Z
REQreate/spatial_distribution_class.py
michellqueiroz-ua/instance-generator
8b431a64898bcf1006464a8394824ab57576811e
[ "MIT" ]
null
null
null
REQreate/spatial_distribution_class.py
michellqueiroz-ua/instance-generator
8b431a64898bcf1006464a8394824ab57576811e
[ "MIT" ]
null
null
null
import math import numpy as np import random class SpatialDistribution: def __init__(self, num_origins, num_destinations, prob, origin_zones=[], destination_zones=[], is_random_origin_zones=False, is_random_destination_zones=False): self.num_origins = int(num_origins) self.num_destinations = int(num_destinations) self.origin_zones = origin_zones self.destination_zones = destination_zones self.is_random_origin_zones = is_random_origin_zones self.is_random_destination_zones = is_random_destination_zones self.prob = prob def set_number_origins(self, num_origins): self.num_origins = int(num_origins) def set_number_destinations(self, num_destinations): self.num_destinations = int(num_destinations) ''' def add_origin_zone(self, zone_id): self.is_random_origin_zones = False self.origin_zones.append(int(zone_id)) def add_destination_zone(self, zone_id): self.is_random_destination_zones = False self.destination_zones.append(int(zone_id)) ''' def randomly_sample_origin_zones(self, num_zones): self.origin_zones = [] if self.num_origins != -1: self.origin_zones = np.random.randint(0, num_zones, self.num_origins) def randomly_sample_destination_zones(self, num_zones): self.destination_zones = [] if self.num_destinations != -1: self.destination_zones = np.random.randint(0, num_zones, self.num_destinations)
27.017241
164
0.694959
acede7fab2f8046079dda5b4588d5df0b80cba4b
2,571
py
Python
tools/c7n_salactus/c7n_salactus/rqworker.py
kaskrish/cloud-custodian
97fd1048d866657c0e85816eaeff55018c336fe8
[ "Apache-2.0" ]
1
2017-06-26T18:30:22.000Z
2017-06-26T18:30:22.000Z
tools/c7n_salactus/c7n_salactus/rqworker.py
kaskrish/cloud-custodian
97fd1048d866657c0e85816eaeff55018c336fe8
[ "Apache-2.0" ]
1
2021-02-24T04:42:37.000Z
2021-02-24T04:42:37.000Z
tools/c7n_salactus/c7n_salactus/rqworker.py
kaskrish/cloud-custodian
97fd1048d866657c0e85816eaeff55018c336fe8
[ "Apache-2.0" ]
1
2021-01-26T10:03:21.000Z
2021-01-26T10:03:21.000Z
# Copyright 2017 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ rq worker customizations - dont fork per job - use compressed msg pack messages (less mem in queue) """ import datetime import msgpack import cPickle import logging from lz4.frame import compress, decompress from rq.worker import Worker from rq import job PackDate_ExtType = 42 PackObj_ExtType = 43 job_default_load = job.loads log = logging.getLogger('salactus') handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) log.addHandler(handler) log.setLevel(logging.INFO) def decode_ext(code, data): if code == PackDate_ExtType: values = msgpack.unpackb(data) return datetime.datetime(*values) elif code == PackObj_ExtType: return cPickle.loads(data) return msgpack.ExtType(code, data) def encode_ext(obj): if isinstance(obj, datetime.datetime): components = (obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second, obj.microsecond) data = msgpack.ExtType(PackDate_ExtType, msgpack.packb(components)) return data return msgpack.ExtType( PackObj_ExtType, cPickle.dumps(obj, protocol=cPickle.HIGHEST_PROTOCOL)) def dumps(o): return compress( msgpack.packb(o, default=encode_ext, use_bin_type=True)) def loads(s): try: return msgpack.unpackb( decompress(s), ext_hook=decode_ext, encoding='utf-8') except Exception: # we queue work occassionally from lambdas or other systems not using # the worker class return job_default_load(s) job.dumps = dumps job.loads = loads class SalactusWorker(Worker): """Get rid of process boundary, maintain worker status. We rely on supervisord for process supervision, and we want to be able to cache sts sessions per process to avoid role assume storms. """ def execute_job(self, job, queue): self.set_state('busy') self.perform_job(job, queue) self.set_state('idle')
28.566667
77
0.712952
acedea84a74c21f6d9317d722772bc5cfd5e9bd7
70
py
Python
build/lib/beepbot/__init__.py
makebot-lab/beepbot
6082f73e880eaa5651e10f9cf793cda1b2a26ebc
[ "MIT" ]
null
null
null
build/lib/beepbot/__init__.py
makebot-lab/beepbot
6082f73e880eaa5651e10f9cf793cda1b2a26ebc
[ "MIT" ]
null
null
null
build/lib/beepbot/__init__.py
makebot-lab/beepbot
6082f73e880eaa5651e10f9cf793cda1b2a26ebc
[ "MIT" ]
null
null
null
__all__ = ["move", "oled", "lights", "utils"] __version__ = "1.0.3"
23.333333
46
0.571429
acedeb6a8fe4effdefe1bf146714b35eeeea19ae
2,093
py
Python
sdnotify/__init__.py
dlech/sdnotify
d349a649d961b02df4755efa2e21807940227c4d
[ "MIT" ]
8
2020-11-13T08:48:01.000Z
2021-12-16T06:30:27.000Z
sdnotify/__init__.py
dlech/sdnotify
d349a649d961b02df4755efa2e21807940227c4d
[ "MIT" ]
4
2022-01-05T09:16:30.000Z
2022-03-29T09:32:44.000Z
sdnotify/__init__.py
dlech/sdnotify
d349a649d961b02df4755efa2e21807940227c4d
[ "MIT" ]
1
2022-01-05T08:56:59.000Z
2022-01-05T08:56:59.000Z
import socket import os import sys __version__ = "0.3.2" # Byte conversion utility for compatibility between # Python 2 and 3. # http://python3porting.com/problems.html#nicer-solutions if sys.version_info < (3,): def _b(x): return x else: import codecs def _b(x): return codecs.latin_1_encode(x)[0] class SystemdNotifier: """This class holds a connection to the systemd notification socket and can be used to send messages to systemd using its notify method.""" def __init__(self, debug=False): """Instantiate a new notifier object. This will initiate a connection to the systemd notification socket. Normally this method silently ignores exceptions (for example, if the systemd notification socket is not available) to allow applications to function on non-systemd based systems. However, setting debug=True will cause this method to raise any exceptions generated to the caller, to aid in debugging. """ self.debug = debug try: self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) addr = os.getenv('NOTIFY_SOCKET') if addr[0] == '@': addr = '\0' + addr[1:] self.socket.connect(addr) except: self.socket = None if self.debug: raise def notify(self, state): """Send a notification to systemd. state is a string; see the man page of sd_notify (http://www.freedesktop.org/software/systemd/man/sd_notify.html) for a description of the allowable values. Normally this method silently ignores exceptions (for example, if the systemd notification socket is not available) to allow applications to function on non-systemd based systems. However, setting debug=True will cause this method to raise any exceptions generated to the caller, to aid in debugging.""" try: self.socket.sendall(_b(state)) except: if self.debug: raise
34.883333
98
0.644052
acedeb741a02f2e2a62459351df60297748edaf9
18,742
py
Python
easymd/models/backbones/load_checkpoint.py
zhiqi-li/Panoptic-SegFormer
cdb9b68059e9ef825a3f7079c37aa835b1711227
[ "Apache-2.0" ]
97
2021-11-20T17:09:24.000Z
2022-03-31T03:03:38.000Z
easymd/models/backbones/load_checkpoint.py
zhiqi-li/Panoptic-SegFormer
cdb9b68059e9ef825a3f7079c37aa835b1711227
[ "Apache-2.0" ]
3
2021-12-04T10:01:04.000Z
2022-02-25T06:18:57.000Z
easymd/models/backbones/load_checkpoint.py
zhiqi-li/Panoptic-SegFormer
cdb9b68059e9ef825a3f7079c37aa835b1711227
[ "Apache-2.0" ]
9
2021-12-03T08:39:04.000Z
2022-03-27T14:09:04.000Z
# Copyright (c) Open-MMLab. All rights reserved. import io import os import os.path as osp import pkgutil import time import warnings from collections import OrderedDict from importlib import import_module from tempfile import TemporaryDirectory import torch import torchvision from torch.optim import Optimizer from torch.utils import model_zoo from torch.nn import functional as F import mmcv from mmcv.fileio import FileClient from mmcv.fileio import load as load_file from mmcv.parallel import is_module_wrapper from mmcv.utils import mkdir_or_exist from mmcv.runner import get_dist_info ENV_MMCV_HOME = 'MMCV_HOME' ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' DEFAULT_CACHE_DIR = '~/.cache' def _get_mmcv_home(): mmcv_home = os.path.expanduser( os.getenv( ENV_MMCV_HOME, os.path.join( os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) mkdir_or_exist(mmcv_home) return mmcv_home def load_state_dict(module, state_dict, strict=False, logger=None): """Load state_dict to a module. This method is modified from :meth:`torch.nn.Module.load_state_dict`. Default value for ``strict`` is set to ``False`` and the message for param mismatch will be shown even if strict is False. Args: module (Module): Module that receives the state_dict. state_dict (OrderedDict): Weights. strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. logger (:obj:`logging.Logger`, optional): Logger to log the error message. If not specified, print function will be used. """ unexpected_keys = [] all_missing_keys = [] err_msg = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata # use _load_from_state_dict to enable checkpoint version control def load(module, prefix=''): # recursively check parallel module in case that the model has a # complicated structure, e.g., nn.Module(nn.Module(DDP)) if is_module_wrapper(module): module = module.module local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, all_missing_keys, unexpected_keys, err_msg) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(module) load = None # break load->load reference cycle # ignore "num_batches_tracked" of BN layers missing_keys = [ key for key in all_missing_keys if 'num_batches_tracked' not in key ] if unexpected_keys: err_msg.append('unexpected key in source ' f'state_dict: {", ".join(unexpected_keys)}\n') if missing_keys: err_msg.append( f'missing keys in source state_dict: {", ".join(missing_keys)}\n') rank, _ = get_dist_info() if len(err_msg) > 0 and rank == 0: err_msg.insert( 0, 'The model and loaded state dict do not match exactly\n') err_msg = '\n'.join(err_msg) if strict: raise RuntimeError(err_msg) elif logger is not None: logger.warning(err_msg) else: print(err_msg) def load_url_dist(url, model_dir=None): """In distributed setting, this function only download checkpoint at local rank 0.""" rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) if rank == 0: checkpoint = model_zoo.load_url(url, model_dir=model_dir) if world_size > 1: torch.distributed.barrier() if rank > 0: checkpoint = model_zoo.load_url(url, model_dir=model_dir) return checkpoint def load_pavimodel_dist(model_path, map_location=None): """In distributed setting, this function only download checkpoint at local rank 0.""" try: from pavi import modelcloud except ImportError: raise ImportError( 'Please install pavi to load checkpoint from modelcloud.') rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) if rank == 0: model = modelcloud.get(model_path) with TemporaryDirectory() as tmp_dir: downloaded_file = osp.join(tmp_dir, model.name) model.download(downloaded_file) checkpoint = torch.load(downloaded_file, map_location=map_location) if world_size > 1: torch.distributed.barrier() if rank > 0: model = modelcloud.get(model_path) with TemporaryDirectory() as tmp_dir: downloaded_file = osp.join(tmp_dir, model.name) model.download(downloaded_file) checkpoint = torch.load( downloaded_file, map_location=map_location) return checkpoint def load_fileclient_dist(filename, backend, map_location): """In distributed setting, this function only download checkpoint at local rank 0.""" rank, world_size = get_dist_info() rank = int(os.environ.get('LOCAL_RANK', rank)) allowed_backends = ['ceph'] if backend not in allowed_backends: raise ValueError(f'Load from Backend {backend} is not supported.') if rank == 0: fileclient = FileClient(backend=backend) buffer = io.BytesIO(fileclient.get(filename)) checkpoint = torch.load(buffer, map_location=map_location) if world_size > 1: torch.distributed.barrier() if rank > 0: fileclient = FileClient(backend=backend) buffer = io.BytesIO(fileclient.get(filename)) checkpoint = torch.load(buffer, map_location=map_location) return checkpoint def get_torchvision_models(): model_urls = dict() for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): if ispkg: continue _zoo = import_module(f'torchvision.models.{name}') if hasattr(_zoo, 'model_urls'): _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) return model_urls def get_external_models(): mmcv_home = _get_mmcv_home() default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') default_urls = load_file(default_json_path) assert isinstance(default_urls, dict) external_json_path = osp.join(mmcv_home, 'open_mmlab.json') if osp.exists(external_json_path): external_urls = load_file(external_json_path) assert isinstance(external_urls, dict) default_urls.update(external_urls) return default_urls def get_mmcls_models(): mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') mmcls_urls = load_file(mmcls_json_path) return mmcls_urls def get_deprecated_model_names(): deprecate_json_path = osp.join(mmcv.__path__[0], 'model_zoo/deprecated.json') deprecate_urls = load_file(deprecate_json_path) assert isinstance(deprecate_urls, dict) return deprecate_urls def _process_mmcls_checkpoint(checkpoint): state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): if k.startswith('backbone.'): new_state_dict[k[9:]] = v new_checkpoint = dict(state_dict=new_state_dict) return new_checkpoint def _load_checkpoint(filename, map_location=None): """Load checkpoint from somewhere (modelzoo, file, url). Args: filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str | None): Same as :func:`torch.load`. Default: None. Returns: dict | OrderedDict: The loaded checkpoint. It can be either an OrderedDict storing model weights or a dict containing other information, which depends on the checkpoint. """ if filename.startswith('modelzoo://'): warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' 'use "torchvision://" instead') model_urls = get_torchvision_models() model_name = filename[11:] checkpoint = load_url_dist(model_urls[model_name]) elif filename.startswith('torchvision://'): model_urls = get_torchvision_models() model_name = filename[14:] checkpoint = load_url_dist(model_urls[model_name]) elif filename.startswith('open-mmlab://'): model_urls = get_external_models() model_name = filename[13:] deprecated_urls = get_deprecated_model_names() if model_name in deprecated_urls: warnings.warn(f'open-mmlab://{model_name} is deprecated in favor ' f'of open-mmlab://{deprecated_urls[model_name]}') model_name = deprecated_urls[model_name] model_url = model_urls[model_name] # check if is url if model_url.startswith(('http://', 'https://')): checkpoint = load_url_dist(model_url) else: filename = osp.join(_get_mmcv_home(), model_url) if not osp.isfile(filename): raise IOError(f'{filename} is not a checkpoint file') checkpoint = torch.load(filename, map_location=map_location) elif filename.startswith('mmcls://'): model_urls = get_mmcls_models() model_name = filename[8:] checkpoint = load_url_dist(model_urls[model_name]) checkpoint = _process_mmcls_checkpoint(checkpoint) elif filename.startswith(('http://', 'https://')): checkpoint = load_url_dist(filename) elif filename.startswith('pavi://'): model_path = filename[7:] checkpoint = load_pavimodel_dist(model_path, map_location=map_location) elif filename.startswith('s3://'): checkpoint = load_fileclient_dist( filename, backend='ceph', map_location=map_location) else: if not osp.isfile(filename): raise IOError(f'{filename} is not a checkpoint file') checkpoint = torch.load(filename, map_location=map_location) return checkpoint def load_checkpoint(model, filename, map_location='cpu', strict=False, logger=None): """Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint. """ checkpoint = _load_checkpoint(filename, map_location) # OrderedDict is a subclass of dict if not isinstance(checkpoint, dict): raise RuntimeError( f'No state_dict found in checkpoint file {filename}') # get state_dict from checkpoint if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] elif 'model' in checkpoint: state_dict = checkpoint['model'] else: state_dict = checkpoint # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in state_dict.items()} # reshape absolute position embedding if state_dict.get('absolute_pos_embed') is not None: absolute_pos_embed = state_dict['absolute_pos_embed'] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = model.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H*W: logger.warning("Error in loading absolute_pos_embed, pass") else: state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) # interpolate position bias table if needed relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] table_current = model.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f"Error in loading {table_key}, pass") else: if L1 != L2: S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) # load state_dict load_state_dict(model, state_dict, strict, logger) return checkpoint def weights_to_cpu(state_dict): """Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU. """ state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu def _save_to_state_dict(module, destination, prefix, keep_vars): """Saves module state to `destination` dictionary. This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. Args: module (nn.Module): The module to generate state_dict. destination (dict): A dict where state will be stored. prefix (str): The prefix for parameters and buffers used in this module. """ for name, param in module._parameters.items(): if param is not None: destination[prefix + name] = param if keep_vars else param.detach() for name, buf in module._buffers.items(): # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d if buf is not None: destination[prefix + name] = buf if keep_vars else buf.detach() def get_state_dict(module, destination=None, prefix='', keep_vars=False): """Returns a dictionary containing a whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. This method is modified from :meth:`torch.nn.Module.state_dict` to recursively check parallel module in case that the model has a complicated structure, e.g., nn.Module(nn.Module(DDP)). Args: module (nn.Module): The module to generate state_dict. destination (OrderedDict): Returned dict for the state of the module. prefix (str): Prefix of the key. keep_vars (bool): Whether to keep the variable property of the parameters. Default: False. Returns: dict: A dictionary containing a whole state of the module. """ # recursively check parallel module in case that the model has a # complicated structure, e.g., nn.Module(nn.Module(DDP)) if is_module_wrapper(module): module = module.module # below is the same as torch.nn.Module.state_dict() if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() destination._metadata[prefix[:-1]] = local_metadata = dict( version=module._version) _save_to_state_dict(module, destination, prefix, keep_vars) for name, child in module._modules.items(): if child is not None: get_state_dict( child, destination, prefix + name + '.', keep_vars=keep_vars) for hook in module._state_dict_hooks.values(): hook_result = hook(module, destination, prefix, local_metadata) if hook_result is not None: destination = hook_result return destination def save_checkpoint(model, filename, optimizer=None, meta=None): """Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint. """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError(f'meta must be a dict or None, but got {type(meta)}') meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) if is_module_wrapper(model): model = model.module if hasattr(model, 'CLASSES') and model.CLASSES is not None: # save class name to the meta meta.update(CLASSES=model.CLASSES) checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(get_state_dict(model)) } # save optimizer state dict in the checkpoint if isinstance(optimizer, Optimizer): checkpoint['optimizer'] = optimizer.state_dict() elif isinstance(optimizer, dict): checkpoint['optimizer'] = {} for name, optim in optimizer.items(): checkpoint['optimizer'][name] = optim.state_dict() if filename.startswith('pavi://'): try: from pavi import modelcloud from pavi.exception import NodeNotFoundError except ImportError: raise ImportError( 'Please install pavi to load checkpoint from modelcloud.') model_path = filename[7:] root = modelcloud.Folder() model_dir, model_name = osp.split(model_path) try: model = modelcloud.get(model_dir) except NodeNotFoundError: model = root.create_training_model(model_dir) with TemporaryDirectory() as tmp_dir: checkpoint_file = osp.join(tmp_dir, model_name) with open(checkpoint_file, 'wb') as f: torch.save(checkpoint, f) f.flush() model.create_file(checkpoint_file, name=model_name) else: mmcv.mkdir_or_exist(osp.dirname(filename)) # immediately flush buffer with open(filename, 'wb') as f: torch.save(checkpoint, f) f.flush()
37.78629
109
0.64881
acedec5a56cb617166e25b6dc6b52d63cfa7a204
80
py
Python
jupyterlab/_version.py
QUANTAXIS/jupyterlab
afedbf9e6f00d12723b142a3e87729e9ed60b994
[ "BSD-3-Clause" ]
null
null
null
jupyterlab/_version.py
QUANTAXIS/jupyterlab
afedbf9e6f00d12723b142a3e87729e9ed60b994
[ "BSD-3-Clause" ]
null
null
null
jupyterlab/_version.py
QUANTAXIS/jupyterlab
afedbf9e6f00d12723b142a3e87729e9ed60b994
[ "BSD-3-Clause" ]
1
2019-08-11T04:11:15.000Z
2019-08-11T04:11:15.000Z
version_info = (0, 33, 0, 'rc1') __version__ = ".".join(map(str, version_info))
26.666667
46
0.65
acedecbd6eb577bac8e5413cf0b58b767fc907bf
4,076
py
Python
scheduler_app/task/person.py
mantoshkumar1/interview_scheduler
1b29d349770a85620f2ee773b52e6ecc39343dc3
[ "MIT" ]
1
2018-06-25T00:06:46.000Z
2018-06-25T00:06:46.000Z
scheduler_app/task/person.py
mantoshkumar1/interview_scheduler
1b29d349770a85620f2ee773b52e6ecc39343dc3
[ "MIT" ]
null
null
null
scheduler_app/task/person.py
mantoshkumar1/interview_scheduler
1b29d349770a85620f2ee773b52e6ecc39343dc3
[ "MIT" ]
null
null
null
from flask import request, abort from flask_api import status import time, datetime, sqlite3 from werkzeug.exceptions import BadRequest from sqlalchemy import exc from setting import db class Person: @staticmethod def verify_rpc_value ( user_dict ): for key in user_dict: if type ( user_dict[ key ] ) == unicode or type ( user_dict[ key ] ) == str: continue else: raise ValueError ( 'Value is not String' ) @classmethod def verify_post_data ( cls ): """ Verify POST RPC data. If data is not supposed format, it raises appropriate error. :return: """ valid_input_format = {"name": "Test", "email": "test@test.com", \ "day": "Monday", "start_time": "16:30", "end_time": "17:30" } warning_msg = 'Please provide input in following json format: ' + str ( valid_input_format ) # check every field is present and end_time is greater than start_time try: request.json[ 'name' ] # verify email format is correct # I assume user will provide only valid email address request.json[ 'email' ] # verify entry is Mon-Friday only if request.json[ 'day' ] not in ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'): return status.HTTP_400_BAD_REQUEST, {"Error: ": "Day format is incorrect", 'Fix': warning_msg} # verifying whether time is in 24 hours format only otherwise raises ValueError time.strptime ( request.json[ 'start_time' ], '%H:%M' ) time.strptime ( request.json[ 'end_time' ], '%H:%M' ) # verifying end_time is greater than start_time time_a = datetime.datetime.strptime ( request.json[ 'start_time' ], "%H:%M" ) time_b = datetime.datetime.strptime ( request.json[ 'end_time' ], "%H:%M" ) if time_b <= time_a: return status.HTTP_400_BAD_REQUEST, {"Error: ": "end_time is less/equal than start_time", 'Fix': warning_msg} # return False, {"Error: ": "end_time is less/equal than start_time", 'Fix': warning_msg} cls.verify_rpc_value ( request.json ) except KeyError: # All the values are not present (400 Bad Request) return status.HTTP_400_BAD_REQUEST, {"Error": "All mandatory fields are not provided", 'Fix': warning_msg} except ValueError: # time format of start_time and end_time is not in 24 hours format return status.HTTP_400_BAD_REQUEST, { "Error": "Time format is/are not in 24 hours format or one of the values is not string", 'Fix': warning_msg} except BadRequest: # if request is not in json format return status.HTTP_400_BAD_REQUEST, {"Error": "All mandatory fields are not provided in json format", 'Fix': warning_msg} return status.HTTP_200_OK, {"Success": "all ok"} @classmethod def commit_into_db ( cls, content, add_operation = True ): """ Safely commit the content into db :return: """ if add_operation: db.session.add ( content ) else: if content: db.session.delete ( content ) try: db.session.commit ( ) except AssertionError as err: db.session.rollback ( ) abort ( status.HTTP_409_CONFLICT, err ) except ( exc.IntegrityError, sqlite3.IntegrityError ) as err: db.session.rollback ( ) abort ( status.HTTP_409_CONFLICT, err.orig ) except Exception as err: db.session.rollback ( ) abort ( status.HTTP_500_INTERNAL_SERVER_ERROR, err ) finally: pass # db.session.close ( )
40.356436
118
0.562807
aceded88d17758a4cdf470d03ed0ff087cab9088
1,342
py
Python
test/backends/test_sklearn_backend.py
stefan-grafberger/mlinspect-cidr
4a15068a1652c043021d04795bd89da32ec20992
[ "Apache-2.0" ]
5
2020-08-26T13:32:13.000Z
2020-10-20T15:34:57.000Z
test/backends/test_sklearn_backend.py
stefan-grafberger/mlinspect-cidr
4a15068a1652c043021d04795bd89da32ec20992
[ "Apache-2.0" ]
1
2020-08-29T22:53:27.000Z
2020-08-29T22:53:27.000Z
test/backends/test_sklearn_backend.py
stefan-grafberger/mlinspect-cidr
4a15068a1652c043021d04795bd89da32ec20992
[ "Apache-2.0" ]
null
null
null
""" Tests whether the PipelineExecutor works """ import os from test.utils import run_random_annotation_testing_analyzer, run_row_index_annotation_testing_analyzer, \ run_multiple_test_analyzers from mlinspect.utils import get_project_root FILE_PY = os.path.join(str(get_project_root()), "test", "pipelines", "adult_easy.py") def test_sklearn_backend_random_annotation_propagation(): """ Tests whether the sklearn backend works """ with open(FILE_PY) as file: code = file.read() random_annotation_analyzer_result = run_random_annotation_testing_analyzer(code) assert len(random_annotation_analyzer_result) == 16 def test_sklearn_backend_row_index_annotation_propagation(): """ Tests whether the sklearn backend works """ with open(FILE_PY) as file: code = file.read() lineage_result = run_row_index_annotation_testing_analyzer(code) assert len(lineage_result) == 16 def test_sklearn_backend_annotation_propagation_multiple_analyzers(): """ Tests whether the sklearn backend works """ with open(FILE_PY) as file: code = file.read() analyzer_results, analyzers = run_multiple_test_analyzers(code) for analyzer in analyzers: result = analyzer_results[analyzer] assert len(result) == 16
29.173913
107
0.725037
acedede3e8d1b67004167475a1b8359d447c381b
3,781
py
Python
influxdb_client/domain/unsigned_integer_literal.py
MASIFAYUB/influxdb-client-python
a067fa5670a6fbc600db2ac4e54e29e1b7124998
[ "MIT" ]
null
null
null
influxdb_client/domain/unsigned_integer_literal.py
MASIFAYUB/influxdb-client-python
a067fa5670a6fbc600db2ac4e54e29e1b7124998
[ "MIT" ]
null
null
null
influxdb_client/domain/unsigned_integer_literal.py
MASIFAYUB/influxdb-client-python
a067fa5670a6fbc600db2ac4e54e29e1b7124998
[ "MIT" ]
null
null
null
# coding: utf-8 """ InfluxDB OSS API Service. The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501 OpenAPI spec version: 2.0.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from influxdb_client.domain.expression import Expression class UnsignedIntegerLiteral(Expression): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'type': 'str', 'value': 'str' } attribute_map = { 'type': 'type', 'value': 'value' } def __init__(self, type=None, value=None): # noqa: E501,D401,D403 """UnsignedIntegerLiteral - a model defined in OpenAPI.""" # noqa: E501 Expression.__init__(self) # noqa: E501 self._type = None self._value = None self.discriminator = None if type is not None: self.type = type if value is not None: self.value = value @property def type(self): """Get the type of this UnsignedIntegerLiteral. Type of AST node :return: The type of this UnsignedIntegerLiteral. :rtype: str """ # noqa: E501 return self._type @type.setter def type(self, type): """Set the type of this UnsignedIntegerLiteral. Type of AST node :param type: The type of this UnsignedIntegerLiteral. :type: str """ # noqa: E501 self._type = type @property def value(self): """Get the value of this UnsignedIntegerLiteral. :return: The value of this UnsignedIntegerLiteral. :rtype: str """ # noqa: E501 return self._value @value.setter def value(self, value): """Set the value of this UnsignedIntegerLiteral. :param value: The value of this UnsignedIntegerLiteral. :type: str """ # noqa: E501 self._value = value def to_dict(self): """Return the model properties as a dict.""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Return the string representation of the model.""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`.""" return self.to_str() def __eq__(self, other): """Return true if both objects are equal.""" if not isinstance(other, UnsignedIntegerLiteral): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Return true if both objects are not equal.""" return not self == other
27.007143
159
0.567575
acedeed0019b8e8e65fbf3b49ae4c8fee93705de
4,720
py
Python
projectq/setups/ibm.py
Lord-Awesome/ProjectQ
950073301f6886c634af2ab024f1d631f4abdea8
[ "Apache-2.0" ]
1
2019-08-29T19:04:27.000Z
2019-08-29T19:04:27.000Z
projectq/setups/ibm.py
sup0201/ProjectQ
ea99d74b4fa9eabecd9508ceb49daaede9bf6390
[ "Apache-2.0" ]
6
2019-01-27T17:05:25.000Z
2020-02-24T00:15:59.000Z
projectq/setups/ibm.py
sup0201/ProjectQ
ea99d74b4fa9eabecd9508ceb49daaede9bf6390
[ "Apache-2.0" ]
2
2021-01-22T12:09:14.000Z
2021-02-08T11:13:17.000Z
# Copyright 2017 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defines a setup allowing to compile code for the IBM quantum chips: ->Any 5 qubit devices ->the ibmq online simulator ->the melbourne 15 qubit device It provides the `engine_list` for the `MainEngine' based on the requested device. Decompose the circuit into a Rx/Ry/Rz/H/CNOT gate set that will be translated in the backend in the U1/U2/U3/CX gate set. """ import projectq import projectq.setups.decompositions from projectq.setups import restrictedgateset from projectq.ops import (Rx, Ry, Rz, H, CNOT, Barrier) from projectq.cengines import (LocalOptimizer, IBM5QubitMapper, SwapAndCNOTFlipper, BasicMapperEngine, GridMapper) from projectq.backends._ibm._ibm_http_client import show_devices def get_engine_list(token=None, device=None): # Access to the hardware properties via show_devices # Can also be extended to take into account gate fidelities, new available # gate, etc.. devices = show_devices(token) ibm_setup = [] if device not in devices: raise DeviceOfflineError('Error when configuring engine list: device ' 'requested for Backend not connected') if devices[device]['nq'] == 5: # The requested device is a 5 qubit processor # Obtain the coupling map specific to the device coupling_map = devices[device]['coupling_map'] coupling_map = list2set(coupling_map) mapper = IBM5QubitMapper(coupling_map) ibm_setup = [ mapper, SwapAndCNOTFlipper(coupling_map), LocalOptimizer(10) ] elif device == 'ibmq_qasm_simulator': # The 32 qubit online simulator doesn't need a specific mapping for # gates. Can also run wider gateset but this setup keep the # restrictedgateset setup for coherence mapper = BasicMapperEngine() # Note: Manual Mapper doesn't work, because its map is updated only if # gates are applied if gates in the register are not used, then it # will lead to state errors res = dict() for i in range(devices[device]['nq']): res[i] = i mapper.current_mapping = res ibm_setup = [mapper] elif device == 'ibmq_16_melbourne': # Only 15 qubits available on this ibmqx2 unit(in particular qubit 7 # on the grid), therefore need custom grid mapping grid_to_physical = { 0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 15, 8: 14, 9: 13, 10: 12, 11: 11, 12: 10, 13: 9, 14: 8, 15: 7 } coupling_map = devices[device]['coupling_map'] coupling_map = list2set(coupling_map) ibm_setup = [ GridMapper(2, 8, grid_to_physical), LocalOptimizer(5), SwapAndCNOTFlipper(coupling_map), LocalOptimizer(5) ] else: # If there is an online device not handled into ProjectQ it's not too # bad, the engine_list can be constructed manually with the # appropriate mapper and the 'coupling_map' parameter raise DeviceNotHandledError('Device not yet fully handled by ProjectQ') # Most IBM devices accept U1,U2,U3,CX gates. # Most gates need to be decomposed into a subset that is manually converted # in the backend (until the implementation of the U1,U2,U3) # available gates decomposable now for U1,U2,U3: Rx,Ry,Rz and H setup = restrictedgateset.get_engine_list(one_qubit_gates=(Rx, Ry, Rz, H), two_qubit_gates=(CNOT, ), other_gates=(Barrier, )) setup.extend(ibm_setup) return setup class DeviceOfflineError(Exception): pass class DeviceNotHandledError(Exception): pass def list2set(coupling_list): result = [] for el in coupling_list: result.append(tuple(el)) return set(result)
37.165354
79
0.633051
acedef151d4528848adcaea46ad88bf6be5335d0
4,227
py
Python
integration/python/integration_api/models/bank_link_response_internal_object_vo.py
sumit4-ttn/SDK
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
[ "Apache-2.0" ]
null
null
null
integration/python/integration_api/models/bank_link_response_internal_object_vo.py
sumit4-ttn/SDK
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
[ "Apache-2.0" ]
null
null
null
integration/python/integration_api/models/bank_link_response_internal_object_vo.py
sumit4-ttn/SDK
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Hydrogen Integration API The Hydrogen Integration API # noqa: E501 OpenAPI spec version: 1.2.1 Contact: info@hydrogenplatform.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class BankLinkResponseInternalObjectVO(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'nucleus_bank_link': 'BankLink', 'status_code': 'int' } attribute_map = { 'nucleus_bank_link': 'nucleus_bank_link', 'status_code': 'status_code' } def __init__(self, nucleus_bank_link=None, status_code=None): # noqa: E501 """BankLinkResponseInternalObjectVO - a model defined in Swagger""" # noqa: E501 self._nucleus_bank_link = None self._status_code = None self.discriminator = None if nucleus_bank_link is not None: self.nucleus_bank_link = nucleus_bank_link if status_code is not None: self.status_code = status_code @property def nucleus_bank_link(self): """Gets the nucleus_bank_link of this BankLinkResponseInternalObjectVO. # noqa: E501 :return: The nucleus_bank_link of this BankLinkResponseInternalObjectVO. # noqa: E501 :rtype: BankLink """ return self._nucleus_bank_link @nucleus_bank_link.setter def nucleus_bank_link(self, nucleus_bank_link): """Sets the nucleus_bank_link of this BankLinkResponseInternalObjectVO. :param nucleus_bank_link: The nucleus_bank_link of this BankLinkResponseInternalObjectVO. # noqa: E501 :type: BankLink """ self._nucleus_bank_link = nucleus_bank_link @property def status_code(self): """Gets the status_code of this BankLinkResponseInternalObjectVO. # noqa: E501 :return: The status_code of this BankLinkResponseInternalObjectVO. # noqa: E501 :rtype: int """ return self._status_code @status_code.setter def status_code(self, status_code): """Sets the status_code of this BankLinkResponseInternalObjectVO. :param status_code: The status_code of this BankLinkResponseInternalObjectVO. # noqa: E501 :type: int """ self._status_code = status_code def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BankLinkResponseInternalObjectVO, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BankLinkResponseInternalObjectVO): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
29.767606
111
0.610599
acedef50a9dc7cad2e3b1d1a59bca9eaaaa49f6a
37,228
py
Python
sdk/python/pulumi_azure_nextgen/management/v20200601/outputs.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_nextgen/management/v20200601/outputs.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_nextgen/management/v20200601/outputs.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'AliasPathMetadataResponse', 'AliasPathResponse', 'AliasPatternResponse', 'AliasResponse', 'ApiProfileResponse', 'BasicDependencyResponse', 'DebugSettingResponse', 'DependencyResponse', 'DeploymentPropertiesExtendedResponse', 'ErrorAdditionalInfoResponse', 'ErrorResponseResponse', 'OnErrorDeploymentExtendedResponse', 'ParametersLinkResponse', 'ProviderResourceTypeResponse', 'ProviderResponse', 'ResourceReferenceResponse', 'TemplateLinkResponse', ] @pulumi.output_type class AliasPathMetadataResponse(dict): def __init__(__self__, *, attributes: str, type: str): """ :param str attributes: The attributes of the token that the alias path is referring to. :param str type: The type of the token that the alias path is referring to. """ pulumi.set(__self__, "attributes", attributes) pulumi.set(__self__, "type", type) @property @pulumi.getter def attributes(self) -> str: """ The attributes of the token that the alias path is referring to. """ return pulumi.get(self, "attributes") @property @pulumi.getter def type(self) -> str: """ The type of the token that the alias path is referring to. """ return pulumi.get(self, "type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class AliasPathResponse(dict): """ The type of the paths for alias. """ def __init__(__self__, *, metadata: 'outputs.AliasPathMetadataResponse', api_versions: Optional[Sequence[str]] = None, path: Optional[str] = None, pattern: Optional['outputs.AliasPatternResponse'] = None): """ The type of the paths for alias. :param 'AliasPathMetadataResponseArgs' metadata: The metadata of the alias path. If missing, fall back to the default metadata of the alias. :param Sequence[str] api_versions: The API versions. :param str path: The path of an alias. :param 'AliasPatternResponseArgs' pattern: The pattern for an alias path. """ pulumi.set(__self__, "metadata", metadata) if api_versions is not None: pulumi.set(__self__, "api_versions", api_versions) if path is not None: pulumi.set(__self__, "path", path) if pattern is not None: pulumi.set(__self__, "pattern", pattern) @property @pulumi.getter def metadata(self) -> 'outputs.AliasPathMetadataResponse': """ The metadata of the alias path. If missing, fall back to the default metadata of the alias. """ return pulumi.get(self, "metadata") @property @pulumi.getter(name="apiVersions") def api_versions(self) -> Optional[Sequence[str]]: """ The API versions. """ return pulumi.get(self, "api_versions") @property @pulumi.getter def path(self) -> Optional[str]: """ The path of an alias. """ return pulumi.get(self, "path") @property @pulumi.getter def pattern(self) -> Optional['outputs.AliasPatternResponse']: """ The pattern for an alias path. """ return pulumi.get(self, "pattern") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class AliasPatternResponse(dict): """ The type of the pattern for an alias path. """ def __init__(__self__, *, phrase: Optional[str] = None, type: Optional[str] = None, variable: Optional[str] = None): """ The type of the pattern for an alias path. :param str phrase: The alias pattern phrase. :param str type: The type of alias pattern :param str variable: The alias pattern variable. """ if phrase is not None: pulumi.set(__self__, "phrase", phrase) if type is not None: pulumi.set(__self__, "type", type) if variable is not None: pulumi.set(__self__, "variable", variable) @property @pulumi.getter def phrase(self) -> Optional[str]: """ The alias pattern phrase. """ return pulumi.get(self, "phrase") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of alias pattern """ return pulumi.get(self, "type") @property @pulumi.getter def variable(self) -> Optional[str]: """ The alias pattern variable. """ return pulumi.get(self, "variable") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class AliasResponse(dict): """ The alias type. """ def __init__(__self__, *, default_metadata: 'outputs.AliasPathMetadataResponse', default_path: Optional[str] = None, default_pattern: Optional['outputs.AliasPatternResponse'] = None, name: Optional[str] = None, paths: Optional[Sequence['outputs.AliasPathResponse']] = None, type: Optional[str] = None): """ The alias type. :param 'AliasPathMetadataResponseArgs' default_metadata: The default alias path metadata. Applies to the default path and to any alias path that doesn't have metadata :param str default_path: The default path for an alias. :param 'AliasPatternResponseArgs' default_pattern: The default pattern for an alias. :param str name: The alias name. :param Sequence['AliasPathResponseArgs'] paths: The paths for an alias. :param str type: The type of the alias. """ pulumi.set(__self__, "default_metadata", default_metadata) if default_path is not None: pulumi.set(__self__, "default_path", default_path) if default_pattern is not None: pulumi.set(__self__, "default_pattern", default_pattern) if name is not None: pulumi.set(__self__, "name", name) if paths is not None: pulumi.set(__self__, "paths", paths) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="defaultMetadata") def default_metadata(self) -> 'outputs.AliasPathMetadataResponse': """ The default alias path metadata. Applies to the default path and to any alias path that doesn't have metadata """ return pulumi.get(self, "default_metadata") @property @pulumi.getter(name="defaultPath") def default_path(self) -> Optional[str]: """ The default path for an alias. """ return pulumi.get(self, "default_path") @property @pulumi.getter(name="defaultPattern") def default_pattern(self) -> Optional['outputs.AliasPatternResponse']: """ The default pattern for an alias. """ return pulumi.get(self, "default_pattern") @property @pulumi.getter def name(self) -> Optional[str]: """ The alias name. """ return pulumi.get(self, "name") @property @pulumi.getter def paths(self) -> Optional[Sequence['outputs.AliasPathResponse']]: """ The paths for an alias. """ return pulumi.get(self, "paths") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of the alias. """ return pulumi.get(self, "type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ApiProfileResponse(dict): def __init__(__self__, *, api_version: str, profile_version: str): """ :param str api_version: The API version. :param str profile_version: The profile version. """ pulumi.set(__self__, "api_version", api_version) pulumi.set(__self__, "profile_version", profile_version) @property @pulumi.getter(name="apiVersion") def api_version(self) -> str: """ The API version. """ return pulumi.get(self, "api_version") @property @pulumi.getter(name="profileVersion") def profile_version(self) -> str: """ The profile version. """ return pulumi.get(self, "profile_version") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class BasicDependencyResponse(dict): """ Deployment dependency information. """ def __init__(__self__, *, id: Optional[str] = None, resource_name: Optional[str] = None, resource_type: Optional[str] = None): """ Deployment dependency information. :param str id: The ID of the dependency. :param str resource_name: The dependency resource name. :param str resource_type: The dependency resource type. """ if id is not None: pulumi.set(__self__, "id", id) if resource_name is not None: pulumi.set(__self__, "resource_name", resource_name) if resource_type is not None: pulumi.set(__self__, "resource_type", resource_type) @property @pulumi.getter def id(self) -> Optional[str]: """ The ID of the dependency. """ return pulumi.get(self, "id") @property @pulumi.getter(name="resourceName") def resource_name(self) -> Optional[str]: """ The dependency resource name. """ return pulumi.get(self, "resource_name") @property @pulumi.getter(name="resourceType") def resource_type(self) -> Optional[str]: """ The dependency resource type. """ return pulumi.get(self, "resource_type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class DebugSettingResponse(dict): """ The debug setting. """ def __init__(__self__, *, detail_level: Optional[str] = None): """ The debug setting. :param str detail_level: Specifies the type of information to log for debugging. The permitted values are none, requestContent, responseContent, or both requestContent and responseContent separated by a comma. The default is none. When setting this value, carefully consider the type of information you are passing in during deployment. By logging information about the request or response, you could potentially expose sensitive data that is retrieved through the deployment operations. """ if detail_level is not None: pulumi.set(__self__, "detail_level", detail_level) @property @pulumi.getter(name="detailLevel") def detail_level(self) -> Optional[str]: """ Specifies the type of information to log for debugging. The permitted values are none, requestContent, responseContent, or both requestContent and responseContent separated by a comma. The default is none. When setting this value, carefully consider the type of information you are passing in during deployment. By logging information about the request or response, you could potentially expose sensitive data that is retrieved through the deployment operations. """ return pulumi.get(self, "detail_level") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class DependencyResponse(dict): """ Deployment dependency information. """ def __init__(__self__, *, depends_on: Optional[Sequence['outputs.BasicDependencyResponse']] = None, id: Optional[str] = None, resource_name: Optional[str] = None, resource_type: Optional[str] = None): """ Deployment dependency information. :param Sequence['BasicDependencyResponseArgs'] depends_on: The list of dependencies. :param str id: The ID of the dependency. :param str resource_name: The dependency resource name. :param str resource_type: The dependency resource type. """ if depends_on is not None: pulumi.set(__self__, "depends_on", depends_on) if id is not None: pulumi.set(__self__, "id", id) if resource_name is not None: pulumi.set(__self__, "resource_name", resource_name) if resource_type is not None: pulumi.set(__self__, "resource_type", resource_type) @property @pulumi.getter(name="dependsOn") def depends_on(self) -> Optional[Sequence['outputs.BasicDependencyResponse']]: """ The list of dependencies. """ return pulumi.get(self, "depends_on") @property @pulumi.getter def id(self) -> Optional[str]: """ The ID of the dependency. """ return pulumi.get(self, "id") @property @pulumi.getter(name="resourceName") def resource_name(self) -> Optional[str]: """ The dependency resource name. """ return pulumi.get(self, "resource_name") @property @pulumi.getter(name="resourceType") def resource_type(self) -> Optional[str]: """ The dependency resource type. """ return pulumi.get(self, "resource_type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class DeploymentPropertiesExtendedResponse(dict): """ Deployment properties with additional details. """ def __init__(__self__, *, correlation_id: str, debug_setting: 'outputs.DebugSettingResponse', dependencies: Sequence['outputs.DependencyResponse'], duration: str, error: 'outputs.ErrorResponseResponse', mode: str, on_error_deployment: 'outputs.OnErrorDeploymentExtendedResponse', output_resources: Sequence['outputs.ResourceReferenceResponse'], outputs: Any, parameters: Any, parameters_link: 'outputs.ParametersLinkResponse', providers: Sequence['outputs.ProviderResponse'], provisioning_state: str, template_hash: str, template_link: 'outputs.TemplateLinkResponse', timestamp: str, validated_resources: Sequence['outputs.ResourceReferenceResponse']): """ Deployment properties with additional details. :param str correlation_id: The correlation ID of the deployment. :param 'DebugSettingResponseArgs' debug_setting: The debug setting of the deployment. :param Sequence['DependencyResponseArgs'] dependencies: The list of deployment dependencies. :param str duration: The duration of the template deployment. :param 'ErrorResponseResponseArgs' error: The deployment error. :param str mode: The deployment mode. Possible values are Incremental and Complete. :param 'OnErrorDeploymentExtendedResponseArgs' on_error_deployment: The deployment on error behavior. :param Sequence['ResourceReferenceResponseArgs'] output_resources: Array of provisioned resources. :param Any outputs: Key/value pairs that represent deployment output. :param Any parameters: Deployment parameters. :param 'ParametersLinkResponseArgs' parameters_link: The URI referencing the parameters. :param Sequence['ProviderResponseArgs'] providers: The list of resource providers needed for the deployment. :param str provisioning_state: Denotes the state of provisioning. :param str template_hash: The hash produced for the template. :param 'TemplateLinkResponseArgs' template_link: The URI referencing the template. :param str timestamp: The timestamp of the template deployment. :param Sequence['ResourceReferenceResponseArgs'] validated_resources: Array of validated resources. """ pulumi.set(__self__, "correlation_id", correlation_id) pulumi.set(__self__, "debug_setting", debug_setting) pulumi.set(__self__, "dependencies", dependencies) pulumi.set(__self__, "duration", duration) pulumi.set(__self__, "error", error) pulumi.set(__self__, "mode", mode) pulumi.set(__self__, "on_error_deployment", on_error_deployment) pulumi.set(__self__, "output_resources", output_resources) pulumi.set(__self__, "outputs", outputs) pulumi.set(__self__, "parameters", parameters) pulumi.set(__self__, "parameters_link", parameters_link) pulumi.set(__self__, "providers", providers) pulumi.set(__self__, "provisioning_state", provisioning_state) pulumi.set(__self__, "template_hash", template_hash) pulumi.set(__self__, "template_link", template_link) pulumi.set(__self__, "timestamp", timestamp) pulumi.set(__self__, "validated_resources", validated_resources) @property @pulumi.getter(name="correlationId") def correlation_id(self) -> str: """ The correlation ID of the deployment. """ return pulumi.get(self, "correlation_id") @property @pulumi.getter(name="debugSetting") def debug_setting(self) -> 'outputs.DebugSettingResponse': """ The debug setting of the deployment. """ return pulumi.get(self, "debug_setting") @property @pulumi.getter def dependencies(self) -> Sequence['outputs.DependencyResponse']: """ The list of deployment dependencies. """ return pulumi.get(self, "dependencies") @property @pulumi.getter def duration(self) -> str: """ The duration of the template deployment. """ return pulumi.get(self, "duration") @property @pulumi.getter def error(self) -> 'outputs.ErrorResponseResponse': """ The deployment error. """ return pulumi.get(self, "error") @property @pulumi.getter def mode(self) -> str: """ The deployment mode. Possible values are Incremental and Complete. """ return pulumi.get(self, "mode") @property @pulumi.getter(name="onErrorDeployment") def on_error_deployment(self) -> 'outputs.OnErrorDeploymentExtendedResponse': """ The deployment on error behavior. """ return pulumi.get(self, "on_error_deployment") @property @pulumi.getter(name="outputResources") def output_resources(self) -> Sequence['outputs.ResourceReferenceResponse']: """ Array of provisioned resources. """ return pulumi.get(self, "output_resources") @property @pulumi.getter def outputs(self) -> Any: """ Key/value pairs that represent deployment output. """ return pulumi.get(self, "outputs") @property @pulumi.getter def parameters(self) -> Any: """ Deployment parameters. """ return pulumi.get(self, "parameters") @property @pulumi.getter(name="parametersLink") def parameters_link(self) -> 'outputs.ParametersLinkResponse': """ The URI referencing the parameters. """ return pulumi.get(self, "parameters_link") @property @pulumi.getter def providers(self) -> Sequence['outputs.ProviderResponse']: """ The list of resource providers needed for the deployment. """ return pulumi.get(self, "providers") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ Denotes the state of provisioning. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="templateHash") def template_hash(self) -> str: """ The hash produced for the template. """ return pulumi.get(self, "template_hash") @property @pulumi.getter(name="templateLink") def template_link(self) -> 'outputs.TemplateLinkResponse': """ The URI referencing the template. """ return pulumi.get(self, "template_link") @property @pulumi.getter def timestamp(self) -> str: """ The timestamp of the template deployment. """ return pulumi.get(self, "timestamp") @property @pulumi.getter(name="validatedResources") def validated_resources(self) -> Sequence['outputs.ResourceReferenceResponse']: """ Array of validated resources. """ return pulumi.get(self, "validated_resources") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ErrorAdditionalInfoResponse(dict): """ The resource management error additional info. """ def __init__(__self__, *, info: Any, type: str): """ The resource management error additional info. :param Any info: The additional info. :param str type: The additional info type. """ pulumi.set(__self__, "info", info) pulumi.set(__self__, "type", type) @property @pulumi.getter def info(self) -> Any: """ The additional info. """ return pulumi.get(self, "info") @property @pulumi.getter def type(self) -> str: """ The additional info type. """ return pulumi.get(self, "type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ErrorResponseResponse(dict): """ Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.) """ def __init__(__self__, *, additional_info: Sequence['outputs.ErrorAdditionalInfoResponse'], code: str, details: Sequence['outputs.ErrorResponseResponse'], message: str, target: str): """ Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.) :param Sequence['ErrorAdditionalInfoResponseArgs'] additional_info: The error additional info. :param str code: The error code. :param Sequence['ErrorResponseResponseArgs'] details: The error details. :param str message: The error message. :param str target: The error target. """ pulumi.set(__self__, "additional_info", additional_info) pulumi.set(__self__, "code", code) pulumi.set(__self__, "details", details) pulumi.set(__self__, "message", message) pulumi.set(__self__, "target", target) @property @pulumi.getter(name="additionalInfo") def additional_info(self) -> Sequence['outputs.ErrorAdditionalInfoResponse']: """ The error additional info. """ return pulumi.get(self, "additional_info") @property @pulumi.getter def code(self) -> str: """ The error code. """ return pulumi.get(self, "code") @property @pulumi.getter def details(self) -> Sequence['outputs.ErrorResponseResponse']: """ The error details. """ return pulumi.get(self, "details") @property @pulumi.getter def message(self) -> str: """ The error message. """ return pulumi.get(self, "message") @property @pulumi.getter def target(self) -> str: """ The error target. """ return pulumi.get(self, "target") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class OnErrorDeploymentExtendedResponse(dict): """ Deployment on error behavior with additional details. """ def __init__(__self__, *, provisioning_state: str, deployment_name: Optional[str] = None, type: Optional[str] = None): """ Deployment on error behavior with additional details. :param str provisioning_state: The state of the provisioning for the on error deployment. :param str deployment_name: The deployment to be used on error case. :param str type: The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment. """ pulumi.set(__self__, "provisioning_state", provisioning_state) if deployment_name is not None: pulumi.set(__self__, "deployment_name", deployment_name) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The state of the provisioning for the on error deployment. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="deploymentName") def deployment_name(self) -> Optional[str]: """ The deployment to be used on error case. """ return pulumi.get(self, "deployment_name") @property @pulumi.getter def type(self) -> Optional[str]: """ The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment. """ return pulumi.get(self, "type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ParametersLinkResponse(dict): """ Entity representing the reference to the deployment parameters. """ def __init__(__self__, *, uri: str, content_version: Optional[str] = None): """ Entity representing the reference to the deployment parameters. :param str uri: The URI of the parameters file. :param str content_version: If included, must match the ContentVersion in the template. """ pulumi.set(__self__, "uri", uri) if content_version is not None: pulumi.set(__self__, "content_version", content_version) @property @pulumi.getter def uri(self) -> str: """ The URI of the parameters file. """ return pulumi.get(self, "uri") @property @pulumi.getter(name="contentVersion") def content_version(self) -> Optional[str]: """ If included, must match the ContentVersion in the template. """ return pulumi.get(self, "content_version") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ProviderResourceTypeResponse(dict): """ Resource type managed by the resource provider. """ def __init__(__self__, *, api_profiles: Sequence['outputs.ApiProfileResponse'], default_api_version: str, aliases: Optional[Sequence['outputs.AliasResponse']] = None, api_versions: Optional[Sequence[str]] = None, capabilities: Optional[str] = None, locations: Optional[Sequence[str]] = None, properties: Optional[Mapping[str, str]] = None, resource_type: Optional[str] = None): """ Resource type managed by the resource provider. :param Sequence['ApiProfileResponseArgs'] api_profiles: The API profiles for the resource provider. :param str default_api_version: The default API version. :param Sequence['AliasResponseArgs'] aliases: The aliases that are supported by this resource type. :param Sequence[str] api_versions: The API version. :param str capabilities: The additional capabilities offered by this resource type. :param Sequence[str] locations: The collection of locations where this resource type can be created. :param Mapping[str, str] properties: The properties. :param str resource_type: The resource type. """ pulumi.set(__self__, "api_profiles", api_profiles) pulumi.set(__self__, "default_api_version", default_api_version) if aliases is not None: pulumi.set(__self__, "aliases", aliases) if api_versions is not None: pulumi.set(__self__, "api_versions", api_versions) if capabilities is not None: pulumi.set(__self__, "capabilities", capabilities) if locations is not None: pulumi.set(__self__, "locations", locations) if properties is not None: pulumi.set(__self__, "properties", properties) if resource_type is not None: pulumi.set(__self__, "resource_type", resource_type) @property @pulumi.getter(name="apiProfiles") def api_profiles(self) -> Sequence['outputs.ApiProfileResponse']: """ The API profiles for the resource provider. """ return pulumi.get(self, "api_profiles") @property @pulumi.getter(name="defaultApiVersion") def default_api_version(self) -> str: """ The default API version. """ return pulumi.get(self, "default_api_version") @property @pulumi.getter def aliases(self) -> Optional[Sequence['outputs.AliasResponse']]: """ The aliases that are supported by this resource type. """ return pulumi.get(self, "aliases") @property @pulumi.getter(name="apiVersions") def api_versions(self) -> Optional[Sequence[str]]: """ The API version. """ return pulumi.get(self, "api_versions") @property @pulumi.getter def capabilities(self) -> Optional[str]: """ The additional capabilities offered by this resource type. """ return pulumi.get(self, "capabilities") @property @pulumi.getter def locations(self) -> Optional[Sequence[str]]: """ The collection of locations where this resource type can be created. """ return pulumi.get(self, "locations") @property @pulumi.getter def properties(self) -> Optional[Mapping[str, str]]: """ The properties. """ return pulumi.get(self, "properties") @property @pulumi.getter(name="resourceType") def resource_type(self) -> Optional[str]: """ The resource type. """ return pulumi.get(self, "resource_type") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ProviderResponse(dict): """ Resource provider information. """ def __init__(__self__, *, id: str, registration_policy: str, registration_state: str, resource_types: Sequence['outputs.ProviderResourceTypeResponse'], namespace: Optional[str] = None): """ Resource provider information. :param str id: The provider ID. :param str registration_policy: The registration policy of the resource provider. :param str registration_state: The registration state of the resource provider. :param Sequence['ProviderResourceTypeResponseArgs'] resource_types: The collection of provider resource types. :param str namespace: The namespace of the resource provider. """ pulumi.set(__self__, "id", id) pulumi.set(__self__, "registration_policy", registration_policy) pulumi.set(__self__, "registration_state", registration_state) pulumi.set(__self__, "resource_types", resource_types) if namespace is not None: pulumi.set(__self__, "namespace", namespace) @property @pulumi.getter def id(self) -> str: """ The provider ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="registrationPolicy") def registration_policy(self) -> str: """ The registration policy of the resource provider. """ return pulumi.get(self, "registration_policy") @property @pulumi.getter(name="registrationState") def registration_state(self) -> str: """ The registration state of the resource provider. """ return pulumi.get(self, "registration_state") @property @pulumi.getter(name="resourceTypes") def resource_types(self) -> Sequence['outputs.ProviderResourceTypeResponse']: """ The collection of provider resource types. """ return pulumi.get(self, "resource_types") @property @pulumi.getter def namespace(self) -> Optional[str]: """ The namespace of the resource provider. """ return pulumi.get(self, "namespace") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class ResourceReferenceResponse(dict): """ The resource Id model. """ def __init__(__self__, *, id: str): """ The resource Id model. :param str id: The fully qualified resource Id. """ pulumi.set(__self__, "id", id) @property @pulumi.getter def id(self) -> str: """ The fully qualified resource Id. """ return pulumi.get(self, "id") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class TemplateLinkResponse(dict): """ Entity representing the reference to the template. """ def __init__(__self__, *, content_version: Optional[str] = None, id: Optional[str] = None, relative_path: Optional[str] = None, uri: Optional[str] = None): """ Entity representing the reference to the template. :param str content_version: If included, must match the ContentVersion in the template. :param str id: The resource id of a Template Spec. Use either the id or uri property, but not both. :param str relative_path: Applicable only if this template link references a Template Spec. This relativePath property can optionally be used to reference a Template Spec artifact by path. :param str uri: The URI of the template to deploy. Use either the uri or id property, but not both. """ if content_version is not None: pulumi.set(__self__, "content_version", content_version) if id is not None: pulumi.set(__self__, "id", id) if relative_path is not None: pulumi.set(__self__, "relative_path", relative_path) if uri is not None: pulumi.set(__self__, "uri", uri) @property @pulumi.getter(name="contentVersion") def content_version(self) -> Optional[str]: """ If included, must match the ContentVersion in the template. """ return pulumi.get(self, "content_version") @property @pulumi.getter def id(self) -> Optional[str]: """ The resource id of a Template Spec. Use either the id or uri property, but not both. """ return pulumi.get(self, "id") @property @pulumi.getter(name="relativePath") def relative_path(self) -> Optional[str]: """ Applicable only if this template link references a Template Spec. This relativePath property can optionally be used to reference a Template Spec artifact by path. """ return pulumi.get(self, "relative_path") @property @pulumi.getter def uri(self) -> Optional[str]: """ The URI of the template to deploy. Use either the uri or id property, but not both. """ return pulumi.get(self, "uri") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
34.24839
495
0.627995
acedef74ce14a10ff13e4a19d0c9d051d181ee49
12,614
py
Python
Setup/setup_3c/TrainingInterfaces/Text_to_Spectrogram/Tacotron2/tacotron2_train_loop.py
fairwell-coding/speech_synthesis
f2fd089d5bef87d2229df63da8614878e6ba1430
[ "Apache-2.0" ]
null
null
null
Setup/setup_3c/TrainingInterfaces/Text_to_Spectrogram/Tacotron2/tacotron2_train_loop.py
fairwell-coding/speech_synthesis
f2fd089d5bef87d2229df63da8614878e6ba1430
[ "Apache-2.0" ]
null
null
null
Setup/setup_3c/TrainingInterfaces/Text_to_Spectrogram/Tacotron2/tacotron2_train_loop.py
fairwell-coding/speech_synthesis
f2fd089d5bef87d2229df63da8614878e6ba1430
[ "Apache-2.0" ]
null
null
null
import os import time import matplotlib.pyplot as plt import torch import torch.multiprocessing import torch.nn.functional as F from speechbrain.pretrained import EncoderClassifier from torch.cuda.amp import GradScaler from torch.cuda.amp import autocast from torch.nn.utils.rnn import pad_sequence from torch.utils.data.dataloader import DataLoader from tqdm import tqdm from Preprocessing.TextFrontend import TextFrontend from TrainingInterfaces.Text_to_Spectrogram.Tacotron2.AlignmentLoss import binarize_attention_parallel from Utility.utils import delete_old_checkpoints from Utility.utils import get_most_recent_checkpoint def plot_attention(model, lang, device, speaker_embedding, att_dir, step): tf = TextFrontend(language=lang, use_word_boundaries=False, use_explicit_eos=False) sentence = "" if lang == "en": sentence = "This is a complex sentence, it even has a pause!" elif lang == "de": sentence = "Dies ist ein komplexer Satz, er hat sogar eine Pause!" elif lang == "at-lab": sentence = "Aber die gibt es schon seit Jahrzehnten!" if lang == "at-lab": text = tf.string_to_tensor(sentence, path_to_wavfile="/IMS-Toucan/aridialect/aridialect_wav16000/spo_at_falter060401bis060630_001683.wav").long().squeeze(0).to(device) phones = tf.get_phone_string(sentence, path_to_wavfile="/IMS-Toucan/aridialect/aridialect_wav16000/spo_at_falter060401bis060630_001683.wav") else: text = tf.string_to_tensor(sentence).long().squeeze(0).to(device) phones = tf.get_phone_string(sentence) model.eval() att = model.inference(text=text, speaker_embeddings=speaker_embedding)[2].to("cpu") model.train() del tf bin_att = binarize_attention_parallel(att.unsqueeze(0).unsqueeze(1), in_lens=torch.LongTensor([len(text)]), out_lens=torch.LongTensor([len(att)])).squeeze(0).squeeze(0).detach().numpy() fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 9)) ax[0].imshow(att.detach().numpy(), interpolation='nearest', aspect='auto', origin="lower") ax[1].imshow(bin_att, interpolation='nearest', aspect='auto', origin="lower") ax[1].set_xlabel("Inputs") ax[0].xaxis.set_visible(False) ax[0].set_ylabel("Outputs") ax[1].set_ylabel("Outputs") ax[1].set_xticks(range(len(att[0]))) del att ax[1].set_xticklabels(labels=[phone for phone in phones]) ax[0].set_title("Soft-Attention") ax[1].set_title("Hard-Attention") fig.tight_layout() if not os.path.exists(os.path.join(att_dir, "attention_plots")): os.makedirs(os.path.join(att_dir, "attention_plots")) fig.savefig(os.path.join(os.path.join(att_dir, "attention_plots"), str(step) + ".png")) fig.clf() plt.close() def collate_and_pad(batch): if len(batch[0]) == 4: # every entry in batch: [text, text_length, spec, spec_length] return (pad_sequence([datapoint[0].squeeze(0) for datapoint in batch], batch_first=True), torch.stack([datapoint[1] for datapoint in batch]).squeeze(1), pad_sequence([datapoint[2] for datapoint in batch], batch_first=True), torch.stack([datapoint[3] for datapoint in batch]).squeeze(1)) elif len(batch[0]) == 5: # every entry in batch: [text, text_length, spec, spec_length, speaker_embedding] return (pad_sequence([datapoint[0].squeeze(0) for datapoint in batch], batch_first=True), torch.stack([datapoint[1] for datapoint in batch]).squeeze(1), pad_sequence([datapoint[2] for datapoint in batch], batch_first=True), torch.stack([datapoint[3] for datapoint in batch]).squeeze(1), torch.stack([datapoint[4] for datapoint in batch])) def train_loop(net, train_dataset, device, save_directory, batch_size, steps, epochs_per_save, lang, lr, use_speaker_embedding=False, path_to_checkpoint=None, fine_tune=False, collapse_margin=5.0, # be wary of loss scheduling resume=False, use_cycle_consistency_for_speakerembedding=False): """ Args: resume: whether to resume from the most recent checkpoint collapse_margin: margin in which the loss may increase in one epoch without triggering the soft-reset steps: How many steps to train lr: The initial learning rate for the optimiser path_to_checkpoint: reloads a checkpoint to continue training from there fine_tune: whether to load everything from a checkpoint, or only the model parameters lang: language of the synthesis use_speaker_embedding: whether to expect speaker embeddings net: Model to train train_dataset: Pytorch Dataset Object for train data device: Device to put the loaded tensors on save_directory: Where to save the checkpoints batch_size: How many elements should be loaded at once epochs_per_save: how many epochs to train in between checkpoints """ net = net.to(device) scaler = GradScaler() previous_error = 999999 # tacotron can collapse sometimes and requires soft-resets. This is to detect collapses. train_loader = DataLoader(batch_size=batch_size, dataset=train_dataset, drop_last=True, num_workers=10, pin_memory=True, shuffle=True, prefetch_factor=10, collate_fn=collate_and_pad, persistent_workers=True) if use_speaker_embedding: reference_speaker_embedding_for_att_plot = torch.Tensor(train_dataset[0][4]).to(device) if use_cycle_consistency_for_speakerembedding: speaker_embedding_func = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", run_opts={"device": str(device)}, savedir="Models/speechbrain_speaker_embedding_ecapa") else: reference_speaker_embedding_for_att_plot = None step_counter = 0 epoch = 0 net.train() optimizer = torch.optim.Adam(net.parameters(), lr=lr) if resume: path_to_checkpoint = get_most_recent_checkpoint(checkpoint_dir=save_directory) if path_to_checkpoint is not None: # careful when restarting, plotting data will be overwritten! check_dict = torch.load(os.path.join(path_to_checkpoint), map_location=device) net.load_state_dict(check_dict["model"]) if not fine_tune: optimizer.load_state_dict(check_dict["optimizer"]) scaler.load_state_dict(check_dict["scaler"]) step_counter = check_dict["step_counter"] start_time = time.time() while True: epoch += 1 optimizer.zero_grad() train_losses_this_epoch = list() for batch in tqdm(train_loader): with autocast(): if not use_speaker_embedding: train_loss = net(text=batch[0].to(device), text_lengths=batch[1].to(device), speech=batch[2].to(device), speech_lengths=batch[3].to(device), step=step_counter) else: if not use_cycle_consistency_for_speakerembedding: train_loss = net(text=batch[0].to(device), text_lengths=batch[1].to(device), speech=batch[2].to(device), speech_lengths=batch[3].to(device), step=step_counter, speaker_embeddings=batch[4].to(device)) else: train_loss, predicted_mels = net(text=batch[0].to(device), text_lengths=batch[1].to(device), speech=batch[2].to(device), speech_lengths=batch[3].to(device), step=step_counter, speaker_embeddings=batch[4].to(device), return_mels=True) pred_spemb = speaker_embedding_func.modules.embedding_model(predicted_mels, torch.tensor([x / len(predicted_mels[0]) for x in batch[3]])) gold_spemb = speaker_embedding_func.modules.embedding_model(batch[2].to(device), torch.tensor([x / len(batch[2][0]) for x in batch[3]])) # we have to recalculate the speaker embedding from our own mel because we project into a slightly different mel space cosine_cycle_distance = torch.tensor(1.0) - F.cosine_similarity(pred_spemb.squeeze(), gold_spemb.squeeze(), dim=1).mean() pairwise_cycle_distance = F.pairwise_distance(pred_spemb.squeeze(), gold_spemb.squeeze()).mean() cycle_distance = cosine_cycle_distance + pairwise_cycle_distance del pred_spemb del predicted_mels del gold_spemb train_loss = train_loss + cycle_distance * 5 train_losses_this_epoch.append(train_loss.item()) optimizer.zero_grad() scaler.scale(train_loss).backward() del train_loss step_counter += 1 scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(net.parameters(), 1.0, error_if_nonfinite=False) scaler.step(optimizer) scaler.update() with torch.no_grad(): net.eval() loss_this_epoch = sum(train_losses_this_epoch) / len(train_losses_this_epoch) if previous_error + collapse_margin < loss_this_epoch: print("Model Collapse detected! \nPrevious Loss: {}\nNew Loss: {}".format(previous_error, loss_this_epoch)) print("Trying to reset to a stable state ...") path_to_checkpoint = get_most_recent_checkpoint(checkpoint_dir=save_directory) check_dict = torch.load(path_to_checkpoint, map_location=device) net.load_state_dict(check_dict["model"]) if not fine_tune: optimizer.load_state_dict(check_dict["optimizer"]) step_counter = check_dict["step_counter"] scaler.load_state_dict(check_dict["scaler"]) else: previous_error = loss_this_epoch if epoch % epochs_per_save == 0: torch.save({ "model" : net.state_dict(), "optimizer" : optimizer.state_dict(), "scaler" : scaler.state_dict(), "step_counter": step_counter, }, os.path.join(save_directory, "checkpoint_{}.pt".format(step_counter))) delete_old_checkpoints(save_directory, keep=5) plot_attention(model=net, lang=lang, device=device, speaker_embedding=reference_speaker_embedding_for_att_plot, att_dir=save_directory, step=step_counter) if step_counter > steps: # DONE return print("Epoch: {}".format(epoch)) print("Train Loss: {}".format(loss_this_epoch)) print("Time elapsed: {} Minutes".format(round((time.time() - start_time) / 60))) print("Steps: {}".format(step_counter)) torch.cuda.empty_cache() net.train()
53.449153
175
0.575234
acedf212fab17bf06c5f41d68dc5983c5b68699c
204
py
Python
pygef/__init__.py
RDWimmers/pygef
e13e25fa2327e87a1da9a5409acca6542e5ab8b0
[ "MIT" ]
14
2019-04-19T16:10:40.000Z
2021-08-18T15:13:23.000Z
pygef/__init__.py
RDWimmers/pygef
e13e25fa2327e87a1da9a5409acca6542e5ab8b0
[ "MIT" ]
79
2021-10-11T13:40:12.000Z
2022-03-31T10:26:47.000Z
pygef/__init__.py
RDWimmers/pygef
e13e25fa2327e87a1da9a5409acca6542e5ab8b0
[ "MIT" ]
15
2019-01-07T13:39:50.000Z
2021-08-11T13:51:29.000Z
from pygef._version import __version__ from pygef.bore import Bore from pygef.cpt import Cpt from pygef.plot_utils import plot_merged_cpt_bore from pygef.utils import depth_to_nap, join_gef, nap_to_depth
34
60
0.857843
acedf229bdb2ef56f42aa90a8431e4a013cfe285
14,316
py
Python
homeassistant/components/onvif/camera.py
CoMPaTech/home-assistant
f975654ae7fa03dbec42b49835c08db414d0c738
[ "Apache-2.0" ]
1
2021-01-10T05:35:53.000Z
2021-01-10T05:35:53.000Z
homeassistant/components/onvif/camera.py
CoMPaTech/home-assistant
f975654ae7fa03dbec42b49835c08db414d0c738
[ "Apache-2.0" ]
null
null
null
homeassistant/components/onvif/camera.py
CoMPaTech/home-assistant
f975654ae7fa03dbec42b49835c08db414d0c738
[ "Apache-2.0" ]
1
2020-02-24T16:17:42.000Z
2020-02-24T16:17:42.000Z
"""Support for ONVIF Cameras with FFmpeg as decoder.""" import asyncio import datetime as dt import logging import os from typing import Optional from aiohttp.client_exceptions import ClientConnectionError, ServerDisconnectedError from haffmpeg.camera import CameraMjpeg from haffmpeg.tools import IMAGE_JPEG, ImageFrame import onvif from onvif import ONVIFCamera, exceptions import voluptuous as vol from zeep.exceptions import Fault from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera from homeassistant.components.camera.const import DOMAIN from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS, DATA_FFMPEG from homeassistant.const import ( ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, ) from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream import homeassistant.helpers.config_validation as cv from homeassistant.helpers.service import async_extract_entity_ids import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "ONVIF Camera" DEFAULT_PORT = 5000 DEFAULT_USERNAME = "admin" DEFAULT_PASSWORD = "888888" DEFAULT_ARGUMENTS = "-pred 1" DEFAULT_PROFILE = 0 CONF_PROFILE = "profile" ATTR_PAN = "pan" ATTR_TILT = "tilt" ATTR_ZOOM = "zoom" DIR_UP = "UP" DIR_DOWN = "DOWN" DIR_LEFT = "LEFT" DIR_RIGHT = "RIGHT" ZOOM_OUT = "ZOOM_OUT" ZOOM_IN = "ZOOM_IN" PTZ_NONE = "NONE" SERVICE_PTZ = "onvif_ptz" ONVIF_DATA = "onvif" ENTITIES = "entities" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string, vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_EXTRA_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string, vol.Optional(CONF_PROFILE, default=DEFAULT_PROFILE): vol.All( vol.Coerce(int), vol.Range(min=0) ), } ) SERVICE_PTZ_SCHEMA = vol.Schema( { ATTR_ENTITY_ID: cv.entity_ids, ATTR_PAN: vol.In([DIR_LEFT, DIR_RIGHT, PTZ_NONE]), ATTR_TILT: vol.In([DIR_UP, DIR_DOWN, PTZ_NONE]), ATTR_ZOOM: vol.In([ZOOM_OUT, ZOOM_IN, PTZ_NONE]), } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up a ONVIF camera.""" _LOGGER.debug("Setting up the ONVIF camera platform") async def async_handle_ptz(service): """Handle PTZ service call.""" pan = service.data.get(ATTR_PAN, None) tilt = service.data.get(ATTR_TILT, None) zoom = service.data.get(ATTR_ZOOM, None) all_cameras = hass.data[ONVIF_DATA][ENTITIES] entity_ids = await async_extract_entity_ids(hass, service) target_cameras = [] if not entity_ids: target_cameras = all_cameras else: target_cameras = [ camera for camera in all_cameras if camera.entity_id in entity_ids ] for camera in target_cameras: await camera.async_perform_ptz(pan, tilt, zoom) hass.services.async_register( DOMAIN, SERVICE_PTZ, async_handle_ptz, schema=SERVICE_PTZ_SCHEMA ) _LOGGER.debug("Constructing the ONVIFHassCamera") hass_camera = ONVIFHassCamera(hass, config) await hass_camera.async_initialize() async_add_entities([hass_camera]) return class ONVIFHassCamera(Camera): """An implementation of an ONVIF camera.""" def __init__(self, hass, config): """Initialize an ONVIF camera.""" super().__init__() _LOGGER.debug("Importing dependencies") _LOGGER.debug("Setting up the ONVIF camera component") self._username = config.get(CONF_USERNAME) self._password = config.get(CONF_PASSWORD) self._host = config.get(CONF_HOST) self._port = config.get(CONF_PORT) self._name = config.get(CONF_NAME) self._ffmpeg_arguments = config.get(CONF_EXTRA_ARGUMENTS) self._profile_index = config.get(CONF_PROFILE) self._ptz_service = None self._input = None self._mac = None _LOGGER.debug( "Setting up the ONVIF camera device @ '%s:%s'", self._host, self._port ) self._camera = ONVIFCamera( self._host, self._port, self._username, self._password, "{}/wsdl/".format(os.path.dirname(onvif.__file__)), ) async def async_initialize(self): """ Initialize the camera. Initializes the camera by obtaining the input uri and connecting to the camera. Also retrieves the ONVIF profiles. """ try: _LOGGER.debug("Updating service addresses") await self._camera.update_xaddrs() await self.async_obtain_mac_address() await self.async_check_date_and_time() await self.async_obtain_input_uri() self.setup_ptz() except ClientConnectionError as err: _LOGGER.warning( "Couldn't connect to camera '%s', but will retry later. Error: %s", self._name, err, ) raise PlatformNotReady except Fault as err: _LOGGER.error( "Couldn't connect to camera '%s', please verify " "that the credentials are correct. Error: %s", self._name, err, ) async def async_obtain_mac_address(self): """Obtain the MAC address of the camera to use as the unique ID.""" devicemgmt = self._camera.create_devicemgmt_service() network_interfaces = await devicemgmt.GetNetworkInterfaces() for interface in network_interfaces: if interface.Enabled: self._mac = interface.Info.HwAddress async def async_check_date_and_time(self): """Warns if camera and system date not synced.""" _LOGGER.debug("Setting up the ONVIF device management service") devicemgmt = self._camera.create_devicemgmt_service() _LOGGER.debug("Retrieving current camera date/time") try: system_date = dt_util.utcnow() device_time = await devicemgmt.GetSystemDateAndTime() if not device_time: _LOGGER.debug( """Couldn't get camera '%s' date/time. GetSystemDateAndTime() return null/empty""", self._name, ) return if device_time.UTCDateTime: tzone = dt_util.UTC cdate = device_time.UTCDateTime else: tzone = ( dt_util.get_time_zone(device_time.TimeZone) or dt_util.DEFAULT_TIME_ZONE ) cdate = device_time.LocalDateTime if cdate is None: _LOGGER.warning("Could not retrieve date/time on this camera") else: cam_date = dt.datetime( cdate.Date.Year, cdate.Date.Month, cdate.Date.Day, cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second, 0, tzone, ) cam_date_utc = cam_date.astimezone(dt_util.UTC) _LOGGER.debug("TimeZone for date/time: %s", tzone) _LOGGER.debug("Camera date/time: %s", cam_date) _LOGGER.debug("Camera date/time in UTC: %s", cam_date_utc) _LOGGER.debug("System date/time: %s", system_date) dt_diff = cam_date - system_date dt_diff_seconds = dt_diff.total_seconds() if dt_diff_seconds > 5: _LOGGER.warning( "The date/time on the camera (UTC) is '%s', " "which is different from the system '%s', " "this could lead to authentication issues", cam_date_utc, system_date, ) except ServerDisconnectedError as err: _LOGGER.warning( "Couldn't get camera '%s' date/time. Error: %s", self._name, err ) async def async_obtain_input_uri(self): """Set the input uri for the camera.""" _LOGGER.debug( "Connecting with ONVIF Camera: %s on port %s", self._host, self._port ) try: _LOGGER.debug("Retrieving profiles") media_service = self._camera.create_media_service() profiles = await media_service.GetProfiles() _LOGGER.debug("Retrieved '%d' profiles", len(profiles)) if self._profile_index >= len(profiles): _LOGGER.warning( "ONVIF Camera '%s' doesn't provide profile %d." " Using the last profile.", self._name, self._profile_index, ) self._profile_index = -1 _LOGGER.debug("Using profile index '%d'", self._profile_index) _LOGGER.debug("Retrieving stream uri") # Fix Onvif setup error on Goke GK7102 based IP camera # where we need to recreate media_service #26781 media_service = self._camera.create_media_service() req = media_service.create_type("GetStreamUri") req.ProfileToken = profiles[self._profile_index].token req.StreamSetup = { "Stream": "RTP-Unicast", "Transport": {"Protocol": "RTSP"}, } stream_uri = await media_service.GetStreamUri(req) uri_no_auth = stream_uri.Uri uri_for_log = uri_no_auth.replace("rtsp://", "rtsp://<user>:<password>@", 1) self._input = uri_no_auth.replace( "rtsp://", f"rtsp://{self._username}:{self._password}@", 1 ) _LOGGER.debug( "ONVIF Camera Using the following URL for %s: %s", self._name, uri_for_log, ) except exceptions.ONVIFError as err: _LOGGER.error("Couldn't setup camera '%s'. Error: %s", self._name, err) def setup_ptz(self): """Set up PTZ if available.""" _LOGGER.debug("Setting up the ONVIF PTZ service") if self._camera.get_service("ptz", create=False) is None: _LOGGER.debug("PTZ is not available") else: self._ptz_service = self._camera.create_ptz_service() _LOGGER.debug("Completed set up of the ONVIF camera component") async def async_perform_ptz(self, pan, tilt, zoom): """Perform a PTZ action on the camera.""" if self._ptz_service is None: _LOGGER.warning("PTZ actions are not supported on camera '%s'", self._name) return if self._ptz_service: pan_val = 1 if pan == DIR_RIGHT else -1 if pan == DIR_LEFT else 0 tilt_val = 1 if tilt == DIR_UP else -1 if tilt == DIR_DOWN else 0 zoom_val = 1 if zoom == ZOOM_IN else -1 if zoom == ZOOM_OUT else 0 req = { "Velocity": { "PanTilt": {"_x": pan_val, "_y": tilt_val}, "Zoom": {"_x": zoom_val}, } } try: _LOGGER.debug( "Calling PTZ | Pan = %d | Tilt = %d | Zoom = %d", pan_val, tilt_val, zoom_val, ) await self._ptz_service.ContinuousMove(req) except exceptions.ONVIFError as err: if "Bad Request" in err.reason: self._ptz_service = None _LOGGER.debug("Camera '%s' doesn't support PTZ.", self._name) else: _LOGGER.debug("Camera '%s' doesn't support PTZ.", self._name) async def async_added_to_hass(self): """Handle entity addition to hass.""" _LOGGER.debug("Camera '%s' added to hass", self._name) if ONVIF_DATA not in self.hass.data: self.hass.data[ONVIF_DATA] = {} self.hass.data[ONVIF_DATA][ENTITIES] = [] self.hass.data[ONVIF_DATA][ENTITIES].append(self) async def async_camera_image(self): """Return a still image response from the camera.""" _LOGGER.debug("Retrieving image from camera '%s'", self._name) ffmpeg = ImageFrame(self.hass.data[DATA_FFMPEG].binary, loop=self.hass.loop) image = await asyncio.shield( ffmpeg.get_image( self._input, output_format=IMAGE_JPEG, extra_cmd=self._ffmpeg_arguments ) ) return image async def handle_async_mjpeg_stream(self, request): """Generate an HTTP MJPEG stream from the camera.""" _LOGGER.debug("Handling mjpeg stream from camera '%s'", self._name) ffmpeg_manager = self.hass.data[DATA_FFMPEG] stream = CameraMjpeg(ffmpeg_manager.binary, loop=self.hass.loop) await stream.open_camera(self._input, extra_cmd=self._ffmpeg_arguments) try: stream_reader = await stream.get_reader() return await async_aiohttp_proxy_stream( self.hass, request, stream_reader, ffmpeg_manager.ffmpeg_stream_content_type, ) finally: await stream.close() @property def supported_features(self): """Return supported features.""" if self._input: return SUPPORT_STREAM return 0 async def stream_source(self): """Return the stream source.""" return self._input @property def name(self): """Return the name of this camera.""" return self._name @property def unique_id(self) -> Optional[str]: """Return a unique ID.""" return self._mac
34.330935
88
0.594021
acedf290fed2c1e66150df07cfb61a81a2554b36
2,464
py
Python
ros/robot_guidance/robot_guidance/scripts/reinforcement_learning.py
open-rdc/RobotGuidance
16cfecd23fb81f94ec653c9537e794abb114a175
[ "MIT" ]
2
2018-01-21T23:12:22.000Z
2020-02-03T14:02:54.000Z
ros/robot_guidance/robot_guidance/scripts/reinforcement_learning.py
open-rdc/RobotGuidance
16cfecd23fb81f94ec653c9537e794abb114a175
[ "MIT" ]
31
2017-08-07T06:07:05.000Z
2019-08-07T10:44:51.000Z
ros/robot_guidance/robot_guidance/scripts/reinforcement_learning.py
open-rdc/RobotGuidance
16cfecd23fb81f94ec653c9537e794abb114a175
[ "MIT" ]
1
2020-03-05T08:31:56.000Z
2020-03-05T08:31:56.000Z
import chainerrl import chainer import chainer.functions as F import chainer.links as L import numpy as np import os from os.path import expanduser class QFunction(chainer.Chain): def __init__(self, n_history=3, n_action=4): initializer = chainer.initializers.HeNormal() super(QFunction, self).__init__( conv1=L.Convolution2D(n_history, 32, ksize=8, stride=4, nobias=False, initialW=initializer), conv2=L.Convolution2D(32, 64, ksize=3, stride=2, nobias=False, initialW=initializer), conv3=L.Convolution2D(64, 64, ksize=3, stride=1, nobias=False, initialW=initializer), conv4=L.Linear(960, 512, initialW=initializer), fc5=L.Linear(512, n_action, initialW=np.zeros((n_action, 512), dtype=np.float32)) ) def __call__(self, x, test=False): s = chainer.Variable(x) h1 = F.relu(self.conv1(s)) h2 = F.relu(self.conv2(h1)) h3 = F.relu(self.conv3(h2)) h4 = F.relu(self.conv4(h3)) h5 = self.fc5(h4) h = chainerrl.action_value.DiscreteActionValue(h5) return h class reinforcement_learning: def __init__(self, n_history=3, n_action=4): self.q_func = QFunction(n_history, n_action) try: self.q_func.to_gpu() except: print("No GPU") self.optimizer = chainer.optimizers.Adam(eps=1e-2) self.optimizer.setup(self.q_func) self.gamma = 0.95 self.n_action = n_action self.explorer = chainerrl.explorers.ConstantEpsilonGreedy( epsilon=0.1, random_action_func=self.action_space_sample) self.replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=10 ** 4) self.phi = lambda x: x.astype(np.float32, copy=False) self.agent = chainerrl.agents.DoubleDQN( self.q_func, self.optimizer, self.replay_buffer, self.gamma, self.explorer, minibatch_size=4, replay_start_size=100, update_interval=1, target_update_interval=100, phi=self.phi) home = expanduser("~") if os.path.isdir(home + '/agent'): self.agent.load('agent') print('agent LOADED!!') def act_and_trains(self, obs, reward): self.action = self.agent.act_and_train(obs, reward) return self.action def stop_episode_and_train(self, obs, reward, done): self.action = self.agent.stop_episode_and_train(obs, reward, done) return self.action def act(self, obs): self.action = self.agent.act(obs) return self.action def save_agent(self): self.agent.save('agent') print("agent SAVED!!") def action_space_sample(self): return np.random.randint(1,self.n_action) if __name__ == '__main__': rl = reinforcement_learning()
33.297297
95
0.735795
acedf3e4fcab88bc1cc87d9f582f0606e0ecd784
4,091
py
Python
venv/Lib/site-packages/networkx/algorithms/centrality/percolation.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
10,024
2015-01-01T13:06:43.000Z
2022-03-31T12:45:25.000Z
venv/Lib/site-packages/networkx/algorithms/centrality/percolation.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
3,191
2015-01-01T18:13:11.000Z
2022-03-31T22:06:00.000Z
venv/Lib/site-packages/networkx/algorithms/centrality/percolation.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
3,272
2015-01-01T05:04:53.000Z
2022-03-31T17:46:35.000Z
"""Percolation centrality measures.""" import networkx as nx from networkx.algorithms.centrality.betweenness import ( _single_source_dijkstra_path_basic as dijkstra, ) from networkx.algorithms.centrality.betweenness import ( _single_source_shortest_path_basic as shortest_path, ) __all__ = ["percolation_centrality"] def percolation_centrality(G, attribute="percolation", states=None, weight=None): r"""Compute the percolation centrality for nodes. Percolation centrality of a node $v$, at a given time, is defined as the proportion of ‘percolated paths’ that go through that node. This measure quantifies relative impact of nodes based on their topological connectivity, as well as their percolation states. Percolation states of nodes are used to depict network percolation scenarios (such as during infection transmission in a social network of individuals, spreading of computer viruses on computer networks, or transmission of disease over a network of towns) over time. In this measure usually the percolation state is expressed as a decimal between 0.0 and 1.0. When all nodes are in the same percolated state this measure is equivalent to betweenness centrality. Parameters ---------- G : graph A NetworkX graph. attribute : None or string, optional (default='percolation') Name of the node attribute to use for percolation state, used if `states` is None. states : None or dict, optional (default=None) Specify percolation states for the nodes, nodes as keys states as values. weight : None or string, optional (default=None) If None, all edge weights are considered equal. Otherwise holds the name of the edge attribute used as weight. The weight of an edge is treated as the length or distance between the two sides. Returns ------- nodes : dictionary Dictionary of nodes with percolation centrality as the value. See Also -------- betweenness_centrality Notes ----- The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and Liaquat Hossain [1]_ Pair dependecies are calculated and accumulated using [2]_ For weighted graphs the edge weights must be greater than zero. Zero edge weights can produce an infinite number of equal length paths between pairs of nodes. References ---------- .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes during Percolation in Networks http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095 .. [2] Ulrik Brandes: A Faster Algorithm for Betweenness Centrality. Journal of Mathematical Sociology 25(2):163-177, 2001. https://doi.org/10.1080/0022250X.2001.9990249 """ percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G nodes = G if states is None: states = nx.get_node_attributes(nodes, attribute) # sum of all percolation states p_sigma_x_t = 0.0 for v in states.values(): p_sigma_x_t += v for s in nodes: # single source shortest paths if weight is None: # use BFS S, P, sigma, _ = shortest_path(G, s) else: # use Dijkstra's algorithm S, P, sigma, _ = dijkstra(G, s, weight) # accumulation percolation = _accumulate_percolation( percolation, G, S, P, sigma, s, states, p_sigma_x_t ) n = len(G) for v in percolation: percolation[v] *= 1 / (n - 2) return percolation def _accumulate_percolation(percolation, G, S, P, sigma, s, states, p_sigma_x_t): delta = dict.fromkeys(S, 0) while S: w = S.pop() coeff = (1 + delta[w]) / sigma[w] for v in P[w]: delta[v] += sigma[v] * coeff if w != s: # percolation weight pw_s_w = states[s] / (p_sigma_x_t - states[w]) percolation[w] += delta[w] * pw_s_w return percolation
32.468254
87
0.67123
acedf3f9022519bc2bbeb8b5c858dab4e48d6add
1,704
py
Python
pyramid_oereb/core/sources/real_estate.py
arnaud-morvan/pyramid_oereb
d28348fda0f573eb8156874320270ca93b67bbf5
[ "BSD-2-Clause" ]
null
null
null
pyramid_oereb/core/sources/real_estate.py
arnaud-morvan/pyramid_oereb
d28348fda0f573eb8156874320270ca93b67bbf5
[ "BSD-2-Clause" ]
null
null
null
pyramid_oereb/core/sources/real_estate.py
arnaud-morvan/pyramid_oereb
d28348fda0f573eb8156874320270ca93b67bbf5
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from pyramid_oereb.core.sources import Base from pyramid_oereb.core.records.real_estate import RealEstateRecord class RealEstateBaseSource(Base): """ Base class for real estate sources. Attributes: records (list of pyramid_oereb.lib.records.real_estate.RealEstateRecord): List of real estate records. """ _record_class_ = RealEstateRecord def read(self, params, nb_ident=None, number=None, egrid=None, geometry=None): """ Every real estate source has to implement a read method. This method must accept the four key word parameters. If you want adapt to your own source for real estates, this is the point where to hook in. Args: params (pyramid_oereb.views.webservice.Parameter): The parameters of the extract request. nb_ident (int or None): The identification number of the desired real estate. This parameter is directly related to the number parameter and both must be set! Combination must deliver only one result or must raise an error. number (str or None): The number of parcel or also known real estate. This parameter is directly related to the nb_ident parameter and both must be set! Combination must deliver only one result or must raise an error. (str or None): The unique identifier of the desired real estate. This must deliver only one result or must raise an error. geometry (str): A geometry as WKT string which is used to obtain intersected real estates. This may deliver several results. """ pass # pragma: no cover
50.117647
110
0.680751
acedf402aee903507da28b14fdf357a8d20085fc
10,272
py
Python
solvers/gpt3/lm_solve/run.py
tiendat101001/PythonProgrammingPuzzles
e4a6504bf783ad1ab93686cedd5d1818af92a5e4
[ "MIT" ]
814
2021-06-03T20:07:59.000Z
2022-03-25T09:31:32.000Z
solvers/gpt3/lm_solve/run.py
xu753x/PythonProgrammingPuzzles
506099b8664db2ddefaf1c41cb151743b3751ab3
[ "MIT" ]
16
2021-06-11T18:30:34.000Z
2021-09-24T03:48:10.000Z
solvers/gpt3/lm_solve/run.py
xu753x/PythonProgrammingPuzzles
506099b8664db2ddefaf1c41cb151743b3751ab3
[ "MIT" ]
78
2021-06-11T17:17:14.000Z
2022-02-14T06:47:40.000Z
""" A simple (non-bootstrap) version where you give a (possibly empty) tutorial. """ from typing import List, Tuple, Dict from collections import Counter import utils import ast import time import re import astor import json import random from tqdm import tqdm from . import gpt3_lib from . import judge def get_prompts(prefix, fs, test_prefix=True): """adds function numbers after prompt""" ans = [] if test_prefix: if "---" in prefix: for p in prefix.split("---"): exec(p) if "def f1(" in prefix: i = 1 while f"def f{i}(" in prefix: i += 1 else: i = "" for f in fs: f = f.replace("def f(", f"def f{i}(") ans.append(f"{prefix}{f}\n\nassert True == f{i}(") return ans def strip_param_annotations(f): a = ast.parse(f) args = a.body[0].args.args for arg in args[1:]: arg.annotation = None new_f = astor.to_source(a, pretty_source= lambda source: ''.join(astor.source_repr.split_lines(source, maxline=10 ** 10))) line_0 = new_f.strip().split("\n")[0] return "\n".join([line_0] + f.strip().split("\n")[1:]) def load_puzzles(filename, add_docstring=False): JS = utils.load_json(filename) ans = [] seen = set() for j in JS: name = "_".join(j["name"].split("_")[:-1]) if name in seen: continue seen.add(name) ft = j["sat"].replace("def sat", "def f") f = "\n".join(ft.split("\n")[:1] + ft.split("\n")[2:]) # remove assert if add_docstring: desc = j["desc"] desc = "\n".join(" " * 4 + line if i else line for i, line in enumerate(desc.split("\n"))) # add indent f = "\n".join(f.split("\n")[:1] + [f' """{desc}"""'] + f.split("\n")[1:]) f = strip_param_annotations(f) ans.append((ft, f)) return ans def find_close_paren(st): """Takes a solution and looks for the close parenthesis that would make it parse""" for i, c in enumerate(st): if c == ")": try: ast.parse("(" + st[:i + 1]) return st[:i].strip() except: pass return None _tokenizer = None # used only in num_tokens def num_tokens(s: str): """Compute the number of tokens according to GPT-3""" global _tokenizer if _tokenizer is None: import transformers _tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2") return len(_tokenizer.tokenize(s)) + s.count("\n\n") # not sure why this is needed but seems to work def prompt_experiment(filename, prefix="", n=1000, temp=0.9, stop="\n", timeout=1.0, cache_only=False, add_docstring=False, seed=0): """ Just run n attempts per puzzle n is the number of attempts per puzzle temp is like 0.9 timeout is judging timeout cache_only means do not call GPT-3 LM but instead insist on loading from cache seed makes it so that you can run the experiment more than once without hitting the cache again. returns a list of (f, correct answers) for each puzzle string f where correct answers is a list of (string, index found) """ if seed == 0: seed = None print("=" * 100) print(f"Running prompt experiment with {num_tokens(prefix)} prefix tokens, add_docstring={add_docstring}") for k in locals().copy(): print(f"param {k}: {json.dumps(locals()[k])[:100]}") time0 = time.time() puzzles = load_puzzles(filename, add_docstring) prefix = re.sub(r" +$", "", (prefix or "").lstrip(), flags=re.M) # delete leading/trailing whitespace on each line prompts = get_prompts(prefix, [f for ft, f in puzzles]) successes = [] for p_num, ((ft, f), prompt) in tqdm(enumerate(zip(puzzles, prompts)), total=len(puzzles)): res = gpt3_lib.query(prompt=prompt, temp=temp, n=n, stop=stop, cache_only=cache_only, notes=seed) assert len(res) == n # print(i, "-" * 80) # print(f) # print() valids = [(find_close_paren(g), i) for i, g in enumerate(res)] valids = [(g, i) for (g, i) in valids if g is not None] # double parentheses are necessary to avoid cheating where it changes default parameters :-) results = judge.judge_parallel([f"{ft}\n\nassert True == f(({g}))" for g, _i in valids], timeout=timeout) curr = [(g, i) for (g, i), res in zip(valids, results) if res] successes.append((f, curr)) if curr: ans1 = [a for a, _i in curr] # if verbose: # print(p_num, "-" * 80) # print(strip_param_annotations(f)) # summary = [(a if c == 1 else f"{a} ({c} times)") for a, c in Counter(ans1).most_common(10)] # print(f"{len(curr)} sols, first at attempt #{curr[0][1]}:: {' | '.join(summary)}"[:200]) n_sol = sum(bool(s) for f, s in successes) n_suc = sum(len(s) for f, s in successes) print(f"Solved {n_sol:,}/{len(puzzles):,} puzzles with a total of {n_suc:,} solutions.") print() return successes def bootstrap(filename, iterations, ppi=32, temp=0.9, stop="\n", seed=0, timeout=1.0, max_tokens=2048, gen_tokens=150, verbose=False, cache_only=False): """ Run the bootstrapping experiment iterations is the number of iterations ppi is the number of attempts per puzzle per iteration stop is the token to stop generating on seed is the seed of the random number generator temp is like 0.9 timeout is judging timeout max_tokens is the maximum number of tokens allowed in a prompt gen_tokens is how many tokens to generate at most cache_only means do not call GPT-3 LM but instead insist on loading from cache returns a list of (num_gen, i, f, a) for each solved puzzle where f is the puzzle, i is the index of the puzzle, a is the answer, and num_gen is number of attempts before generation (0-indexed) """ print("=" * 100) print(f"Running GPT3-bootstrap experiment") for k in locals().copy(): print(f"param {k}: {json.dumps(locals()[k])[:100]}") time0 = time.time() rand = random.Random(seed) def get_prompt(f, sep="\n\n---\n\n"): nonlocal solutions, rand, max_tokens, gen_tokens cur = f"{f}\n\nassert True == f(" if not solutions: return cur s2 = solutions[:] rand.shuffle(s2) entries = [f"{f}\n\nassert True == f({g})" for (_it, _i, f, g) in s2] assert f not in "".join(entries) last_prompt = None for k in range(len(entries) + 1): prompt = sep.join([e.replace(' f(', f' f{i + 1}(') for i, e in enumerate(entries[:k] + [cur])]) # print(k, num_tokens(prompt)) if num_tokens(prompt) >= max_tokens - gen_tokens: # print("TOO MANY TOKENS", num_tokens(prompt), num_tokens(last_prompt)) break last_prompt = prompt return last_prompt puzzles = load_puzzles(filename) assert len(puzzles) == len({f for ft, f in puzzles}), "Duplicate puzzles" time0 = time.time() tot = 0 stats = [dict(f=f, ft=ft, gs=[], i=i, raw=[]) for i, (ft, f) in enumerate(puzzles)] solved_by_iteration = [] solutions = [] for it in tqdm(range(iterations)): it_time0 = time.time() solved_by_iteration.append(0) rand.shuffle(stats) # do each iteration in a random order for count, s in enumerate(stats): if s["gs"]: continue # already solved i = s["i"] f = s["f"] ft = s["ft"] prompt = get_prompt(f) num_solved = sum(bool(s['gs']) for s in stats) if verbose: if count == 0: print("Prompt:" + ":" * 80) print(prompt) if count % 10 == 0: print(f" * It {it}/{iterations} ({count / len(stats):.0%}) puzzle {i} " f"solved {num_solved} temp={temp}", flush=True) candidates = gpt3_lib.query( prompt=prompt, n=ppi, temp=temp, max_tokens=gen_tokens, stop=stop, cache_only=cache_only, notes=(seed, it) ) s["raw"].append((prompt, candidates)) assert len(candidates) == ppi close_parens = [z for z in [(find_close_paren(s), j) for j, s in enumerate(candidates)] if z[0] is not None] # print("Judging:") # for g, _i in close_parens: # print(f"`{ft}\n\n_ans_=({g})\nassert True == f(_ans_)`".replace("\n", "\\n")) results = judge.judge_parallel([f"{ft}\n\n_ans_=({g})\nassert True == f(_ans_)" for g, _i in close_parens], timeout=timeout) if any(results): a, j = next((a, j) for ((a, j), res) in zip(close_parens, results) if res) solutions.append((it*ppi + j, i, f, a)) solved_by_iteration[-1] += 1 s["gs"].append((a, it, j)) if verbose: print(prompt) print(f"YAY it {it}, puzzle {s['i']}") print(f) print(a) it += 1 num_solved = sum(bool(s['gs']) for s in stats) assert sum(solved_by_iteration) == num_solved if verbose: print() print() print(f"+++ Iter {it}: {num_solved} solved {solved_by_iteration}") print(f"+++ {time.time() - it_time0:.1f}s it ({time.time() - time0:.1f}s)") print() print() print(f"Solved {len(solutions):,}/{len(puzzles):,} puzzles") return sorted(solutions) # sols[2][-18] [('[] + [1] * 20', 218)]) # sols[2][2] # [('23', 53), # ('24', 110), # ('23', 174), # ('24', 233), # ('23', 251), # ('23', 332), # ('23', 336), # ('24', 404), # ('24', 410), # ('23', 454), # ('23', 456), # ('23', 605), # ('23', 697)])
34.126246
120
0.545074